bin/hva in wakame-vdc-agents-10.12.0 vs bin/hva in wakame-vdc-agents-11.06.0
- old
+ new
@@ -1,1377 +1,37 @@
#!/usr/bin/env ruby
# -*- coding: utf-8 -*-
-begin
- require 'rubygems'
- require 'bundler'
- Bundler.setup(:default)
-rescue Exception
-end
+$LOAD_PATH.unshift File.expand_path('../../lib', __FILE__)
-require File.expand_path('../../config/path_resolver', __FILE__)
+require 'dcmgr/rubygems'
+require 'dcmgr'
+require 'isono'
include Isono::Runner::RpcServer
-require 'fileutils'
-require 'ipaddress'
-class ServiceNetfilter < Isono::NodeModules::Base
- include Dcmgr::Logger
-
- initialize_hook do
- @worker_thread = Isono::ThreadPool.new(1)
-
- @worker_thread.pass {
- myinstance.init_netfilter
- }
-
- event = Isono::NodeModules::EventChannel.new(node)
-
- event.subscribe('hva/instance_started', '#') do |args|
- @worker_thread.pass {
- logger.info("refresh on instance_started: #{args.inspect}")
- inst_id = args[0]
- #logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
- #myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'started')
- myinstance.init_netfilter
- }
- end
-
- event.subscribe('hva/instance_terminated', '#') do |args|
- @worker_thread.pass {
- logger.info("refresh on instance_terminated: #{args.inspect}")
- inst_id = args[0]
- #logger.info("refresh_netfilter_by_friend_instance_id: #{inst_id}")
- #myinstance.refresh_netfilter_by_friend_instance_id(inst_id, 'terminated')
- myinstance.init_netfilter
- }
- end
-
- event.subscribe('hva/netfilter_updated', '#') do |args|
- @worker_thread.pass {
- logger.info("refresh on netfilter_updated: #{args.inspect}")
- netfilter_group_id = args[0]
- #myinstance.refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
- myinstance.init_netfilter
- }
- end
- end
-
- def init_netfilter
- begin
- inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
-
- viftable_map = {}
- inst_maps = inst_maps.map { |inst_map|
- viftable_map[ inst_map[:ips].first ] = inst_map[:instance_nics].first[:uuid]
-
- # Does the hva have instance?
- unless inst_map[:host_pool][:node_id] == node.node_id
- logger.warn("no match for the instance: #{inst_map[:uuid]}")
- next
- end
- # Does host have vif?
- next unless valid_vif?(inst_map[:instance_nics].first[:uuid])
- inst_maps
- }.flatten.uniq.compact
-
- init_iptables(inst_maps) if @node.manifest.config.enable_iptables
- init_ebtables(inst_maps, viftable_map) if @node.manifest.config.enable_ebtables
- logger.info("initialize netfilter")
- rescue Exception => e
- p e
- end
- end
-
- # from event_subscriber
-# def refresh_netfilter_by_friend_instance_id(inst_id, state = 'started')
-# raise "UnknownInstanceID" if inst_id.nil?
-#
-# begin
-# inst_map = rpc.request('hva-collector', 'get_instance', inst_id)
-# ng = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
-#
-# inst_maps = ng.map { |g|
-# rpc.request('hva-collector', 'get_instances_of_netfilter_group', g[:id])
-# }
-#
-# # my instance_id
-# # when terminated?
-# if state == 'terminated'
-# unless inst_map.nil?
-# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
-# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
-# end
-# end
-#
-# # friend instance(s)
-# if inst_maps.size > 0
-# inst_maps.flatten.uniq.each { |inst_map|
-# unless inst_map.nil?
-# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
-# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
-# end
-# }
-# end
-# rescue Exception => e
-# p e
-# end
-# end
-
- # from event_subscriber
-# def refresh_netfilter_by_joined_netfilter_group_id(netfilter_group_id)
-# raise "UnknownNetfilterGroupID" if netfilter_group_id.nil?
-#
-# begin
-# inst_maps = rpc.request('hva-collector', 'get_instances_of_netfilter_group', netfilter_group_id)
-# inst_maps.each { |inst_map|
-# unless inst_map.nil?
-# refresh_iptables(inst_map) if @node.manifest.config.enable_iptables
-# refresh_ebtables(inst_map) if @node.manifest.config.enable_ebtables
-# end
-# }
-# rescue Exception => e
-# p e
-# end
-# end
-
- def init_ebtables(inst_maps = [], viftable_map = {})
- cmd = "ebtables --init-table"
- puts cmd
- system(cmd)
-
- basic_cmds = []
- group_cmds = []
- final_cmds = []
-
- inst_maps.each { |inst_map|
- vif_map = {
- :uuid => inst_map[:instance_nics].first[:uuid],
- :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
- :ipv4 => inst_map[:ips].first,
- }
-
- basic_cmds << build_ebtables_basic_part(vif_map, inst_map)
- group_cmds << build_ebtables_group_part(vif_map, inst_map, viftable_map)
- final_cmds << build_ebtables_final_part(vif_map)
- }
-
- viftable_map.each { |k,v|
- p "#{v} <-> #{k}"
- }
-
- logger.debug("basic_cmds ...")
- basic_cmds.flatten.uniq.each { |cmd|
- system(cmd)
- }
-
- logger.debug("group_cmds ...")
- group_cmds.flatten.uniq.each { |cmd|
- system(cmd)
- }
-
- logger.debug("final_cmds ...")
- final_cmds.flatten.uniq.each { |cmd|
- system(cmd)
- }
- end
-
- def init_iptables(inst_maps = [])
- [ 'nat', 'filter' ].each { |table|
- [ 'F', 'Z', 'X' ].each { |xcmd|
- cmd = "iptables -t #{table} -#{xcmd}"
- puts cmd
- system(cmd)
- }
- }
-
- inst_maps.each { |inst_map|
- refresh_iptables(inst_map, false)
- }
- end
-
- def valid_vif?(vif)
- cmd = "ifconfig #{vif} >/dev/null 2>&1"
- system(cmd)
-
- if $?.exitstatus == 0
- true
- else
- logger.warn("#{vif}: error fetching interface information: Device not found")
- false
- end
- end
-
-# def refresh_ebtables(inst_map = {}, viftable_map = {})
-# logger.debug("refresh_ebtables: #{inst_map[:uuid]} ...")
-#
-# vif_map = {
-# :uuid => inst_map[:instance_nics].first[:uuid],
-# :mac => inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':'),
-# }
-#
-# # xtables commands
-# basic_cmds = build_ebtables_basic_part(vif_map, inst_map)
-# group_cmds = build_ebtables_group_part(vif_map, inst_map, viftable_map)
-# final_cmds = build_ebtables_final_part(vif_map)
-#
-# logger.debug("refresh_ebtables: #{inst_map[:uuid]} done.")
-# end
-
- def build_ebtables_basic_part(vif_map, inst_map)
- basic_cmds = []
- hva_ipv4 = Isono::Util.default_gw_ipaddr
-
- ################################
- ## 0. chain name
- ################################
-
- # support IP protocol
- protocol_maps = {
- 'ip4' => 'ip4',
- 'arp' => 'arp',
- #'ip6' => 'ip6',
- #'rarp' => '0x8035',
- }
-
- # make chain names.
- chains = []
- chains << "s_#{vif_map[:uuid]}"
- chains << "d_#{vif_map[:uuid]}"
- chains << "s_#{vif_map[:uuid]}_d_hst"
- chains << "d_#{vif_map[:uuid]}_s_hst"
- protocol_maps.each { |k,v|
- chains << "s_#{vif_map[:uuid]}_#{k}"
- chains << "d_#{vif_map[:uuid]}_#{k}"
- chains << "s_#{vif_map[:uuid]}_d_hst_#{k}"
- chains << "d_#{vif_map[:uuid]}_s_hst_#{k}"
- }
-
- ################################
- ## 1. basic part
- ################################
-
- # create user defined chains.
- [ 'N' ].each { |xcmd|
- chains.each { |chain|
- basic_cmds << "ebtables -#{xcmd} #{chain}"
- }
- }
-
- # jumt to user defined chains
- basic_cmds << "ebtables -A FORWARD -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}"
- basic_cmds << "ebtables -A FORWARD -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}"
- basic_cmds << "ebtables -A INPUT -i #{vif_map[:uuid]} -j s_#{vif_map[:uuid]}_d_hst"
- basic_cmds << "ebtables -A OUTPUT -o #{vif_map[:uuid]} -j d_#{vif_map[:uuid]}_s_hst"
-
- # IP protocol routing
- protocol_maps.each { |k,v|
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -p #{v} -j s_#{vif_map[:uuid]}_#{k}"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]} -p #{v} -j d_#{vif_map[:uuid]}_#{k}"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -p #{v} -j s_#{vif_map[:uuid]}_d_hst_#{k}"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst -p #{v} -j d_#{vif_map[:uuid]}_s_hst_#{k}"
- }
-
- # default drop
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]} --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]} -j DROP"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst -j DROP"
-
- # anti spoof: mac
- # guest -> *
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-mac-src ! #{vif_map[:mac]} -j DROP"
-
- # guest <- * (broadcast)
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 --log-ip --log-arp --log-prefix 'Amc d_#{vif_map[:uuid]}_hst_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-src=#{hva_ipv4} --arp-mac-dst 00:00:00:00:00:00 -j ACCEPT"
-
- # guest <- *
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} --log-ip --log-arp --log-prefix 'Dmc d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-mac-dst ! #{vif_map[:mac]} -j DROP"
-
- # anti spoof: ipv4
- inst_map[:ips].each { |ipv4|
- # guest -> *
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
- basic_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src ! #{ipv4} -j DROP"
- # guest <- *
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} --log-ip --log-arp --log-prefix 'Dip d_#{vif_map[:uuid]}_s_hst_arp:' -j CONTINUE"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
- basic_cmds << "ebtables -A d_#{vif_map[:uuid]}_s_hst_arp --protocol arp --arp-ip-dst ! #{ipv4} -j DROP"
- }
-
- basic_cmds
- end
-
-
- def build_ebtables_group_part(vif_map, inst_map, viftable_map)
- group_cmds = []
- hva_ipv4 = Isono::Util.default_gw_ipaddr
-
- ################################
- ## 2. group part
- ################################
- same_subnet_ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
-
- # detect node joined network(s).
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
- raise "UnknownNetworkId" if network_map.nil?
- joined_network = IPAddress("#{network_map[:ipv4_gw]}/#{network_map[:prefix]}")
- [ network_map[:dns_server], network_map[:dhcp_server] ].each { |ipv4|
- next unless joined_network.include? IPAddress(ipv4)
- same_subnet_ipv4s << ipv4
- }
-
- # network resource node(s)
- ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
- rules = ng_maps.map { |ng_map|
- ng_map[:rules].map { |rule| rule[:permission] }
- }.flatten
- build_rule(rules).each do |rule|
- next unless joined_network.include? IPAddress(rule[:ip_source])
- same_subnet_ipv4s << rule[:ip_source]
- end
- same_subnet_ipv4s << network_map[:ipv4_gw]
-
- # guest node(s) in HyperVisor.
- alive_inst_maps = rpc.request('hva-collector', 'get_alive_instances', node.node_id)
- guest_ipv4s = alive_inst_maps.map { |alive_inst_map|
- alive_inst_map[:ips]
- }.flatten.uniq.compact
-
- same_subnet_ipv4s.uniq.reverse_each do |ipv4|
- next if vif_map[:ipv4] == ipv4
-
- # get_macaddr_by_ipv4, ipv4
- if ipv4 == hva_ipv4
- #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [host] ***-****** (#{ipv4})"
- group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
- group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
- elsif guest_ipv4s.include?(ipv4)
- #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [guest] #{viftable_map[ipv4]}(#{ipv4})"
-
- # guest->guest
- group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
- # guest->host
- group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Afw s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
- group_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
-
- unless viftable_map[ipv4].nil?
- # guest->guest
- group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} --log-ip --log-arp --log-prefix 'Arv d_#{viftable_map[ipv4]}_arp:' -j CONTINUE"
- group_cmds << "ebtables -A d_#{viftable_map[ipv4]}_arp --protocol arp --arp-ip-src #{vif_map[:ipv4]} --arp-ip-dst #{ipv4} -j ACCEPT"
-
- # guest->host
- group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Arv s_#{viftable_map[ipv4]}_d_hst_arp:' -j CONTINUE"
- group_cmds << "ebtables -A s_#{viftable_map[ipv4]}_d_hst_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
- end
- else
- #p "#{vif_map[:uuid]}(#{vif_map[:ipv4]}) -> [other] ***-******** (#{ipv4})"
- group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} --log-ip --log-arp --log-prefix 'Afw :d_#{vif_map[:uuid]}_arp' -j CONTINUE"
- group_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --protocol arp --arp-ip-src #{ipv4} --arp-ip-dst #{vif_map[:ipv4]} -j ACCEPT"
- end
- end
-
- group_cmds
- end
-
-
- def build_ebtables_final_part(vif_map)
- final_cmds = []
-
- ################################
- ## 3. final part
- ################################
- # deny,allow
- final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp --log-level warning --log-ip --log-arp --log-prefix 'D d_#{vif_map[:uuid]}_arp:' -j CONTINUE"
- final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp --log-level warning --log-ip --log-arp --log-prefix 'D s_#{vif_map[:uuid]}_d_hst_arp:' -j CONTINUE"
- final_cmds << "ebtables -A d_#{vif_map[:uuid]}_arp -j DROP"
- final_cmds << "ebtables -A s_#{vif_map[:uuid]}_d_hst_arp -j DROP"
-
- final_cmds
- end
-
- def refresh_iptables(inst_map = {}, with_flush = 1)
- logger.debug("refresh_iptables: #{inst_map[:uuid]} ...")
-
- # Does the hva have instance?
- unless inst_map[:host_pool][:node_id] == node.node_id
- logger.warn "no match for the instance: #{inst_map[:uuid]}"
- return
- end
-
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:instance_nics].first[:network_id])
- raise "UnknownNetworkId" if network_map.nil?
-
- vif = inst_map[:instance_nics].first[:uuid]
- vif_mac = inst_map[:instance_nics].first[:mac_addr].unpack('A2'*6).join(':')
-
- if with_flush
- flush_iptables(inst_map)
- end
-
- # Does host have vif?
- unless valid_vif?(vif)
- return
- end
-
-
-
-
- # group node IPv4 addresses.
- ipv4s = rpc.request('hva-collector', 'get_group_instance_ipv4s', inst_map[:uuid])
-
- ng_maps = rpc.request('hva-collector', 'get_netfilter_groups_of_instance', inst_map[:uuid])
- rules = ng_maps.map { |ng_map|
- ng_map[:rules].map { |rule| rule[:permission] }
- }.flatten
-
-
-
-
- # xtables commands
- cmds = []
-
- # support IP protocol
- protocol_maps = {
- 'tcp' => 'tcp',
- 'udp' => 'udp',
- 'icmp' => 'icmp',
- }
-
- # make chain names.
- chains = []
- protocol_maps.each { |k,v|
- chains << "s_#{vif}_#{k}"
- chains << "d_#{vif}_#{k}"
- }
- chains << "s_#{vif}"
- chains << "d_#{vif}"
-
- # metadata-server
- [ 'A' ].each { |xcmd|
- system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80")
- }
-
- # create user defined chains.
- [ 'N' ].each { |xcmd|
- chains.each { |chain|
- cmds << "iptables -#{xcmd} #{chain}"
-
- # logger & drop
- cmds << "iptables -N #{chain}_drop"
- cmds << "iptables -A #{chain}_drop -j LOG --log-level 4 --log-prefix 'D #{chain}:'"
- cmds << "iptables -A #{chain}_drop -j DROP"
- }
- }
-
- # group nodes
- ipv4s << network_map[:ipv4_gw]
- ipv4s.uniq.reverse_each { |addr|
- cmds << "iptables -A d_#{vif} -s #{addr} -j ACCEPT"
- }
-
- # IP protocol routing
- [ 's', 'd' ].each do |bound|
- protocol_maps.each { |k,v|
- cmds << "iptables -N #{bound}_#{vif}_#{k}"
-
- case k
- when 'tcp'
- case bound
- when 's'
- cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
- when 'd'
- #cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
- cmds << "iptables -A #{bound}_#{vif} -m state --state RELATED,ESTABLISHED -p #{k} -j ACCEPT"
- cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
- end
- when 'udp'
- case bound
- when 's'
- cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
- when 'd'
- #cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j #{bound}_#{vif}_#{k}"
- cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED -p #{k} -j ACCEPT"
- cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
- end
- when 'icmp'
- case bound
- when 's'
- cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
- when 'd'
- #cmds << "iptables -A #{bound}_#{vif} -m state --state NEW,ESTABLISHED,RELATED -p #{k} -j #{bound}_#{vif}_#{k}"
- cmds << "iptables -A #{bound}_#{vif} -m state --state ESTABLISHED,RELATED -p #{k} -j ACCEPT"
- cmds << "iptables -A #{bound}_#{vif} -p #{k} -j #{bound}_#{vif}_#{k}"
- end
- end
- }
- end
-
- cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}"
- cmds << "iptables -A FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}"
-
- ##
- ## ACCEPT
- ##
- # DHCP Server
- cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 67 -j ACCEPT"
- cmds << "iptables -A d_#{vif}_udp -p udp -s #{network_map[:dhcp_server]} --sport 68 -j ACCEPT"
-
- #cmds << "iptables -A d_#{vif}_udp -p udp --sport 67 -j d_#{vif}_udp_drop"
- # DNS Server
- cmds << "iptables -A s_#{vif}_udp -p udp -d #{network_map[:dns_server]} --dport 53 -j ACCEPT"
-
- ##
- ## DROP
- ##
- protocol_maps.each { |k,v|
- # DHCP
- cmds << "iptables -A s_#{vif} -d #{network_map[:dhcp_server]} -p #{k} -j s_#{vif}_#{k}_drop"
- # DNS
- cmds << "iptables -A s_#{vif} -d #{network_map[:dns_server]} -p #{k} -j s_#{vif}_#{k}_drop"
- }
-
- # security group
- build_rule(rules).each do |rule|
- case rule[:ip_protocol]
- when 'tcp', 'udp'
- cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --dport #{rule[:ip_dport]} -j ACCEPT"
- when 'icmp'
- # icmp
- # This extension can be used if `--protocol icmp' is specified. It provides the following option:
- # [!] --icmp-type {type[/code]|typename}
- # This allows specification of the ICMP type, which can be a numeric ICMP type, type/code pair, or one of the ICMP type names shown by the command
- # iptables -p icmp -h
- if rule[:icmp_type] == -1 && rule[:icmp_code] == -1
- cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} -j ACCEPT"
- else
- cmds << "iptables -A d_#{vif}_#{rule[:ip_protocol]} -p #{rule[:ip_protocol]} -s #{rule[:ip_source]} --icmp-type #{rule[:icmp_type]}/#{rule[:icmp_code]} -j ACCEPT"
- end
- end
- end
-
- # drop other routings
- protocol_maps.each { |k,v|
- cmds << "iptables -A d_#{vif}_#{k} -p #{k} -j d_#{vif}_#{k}_drop"
- }
-
- # IP protocol routing
- [ 'd' ].each do |bound|
- protocol_maps.each { |k,v|
- cmds << "iptables -A #{bound}_#{vif}_#{k} -j #{bound}_#{vif}_#{k}_drop"
- }
- end
-
- cmds.uniq! if cmds.size > 0
- cmds.compact.each { |cmd|
- #puts cmd
- system(cmd)
- }
-
- logger.debug("refresh_iptables: #{inst_map[:uuid]} done.")
- end
-
- def flush_ebtables(inst_map = {})
- logger.debug("flush_ebtables: #{inst_map[:uuid]} ...")
-
- # Does the hva have instance?
- unless inst_map[:host_pool][:node_id] == node.node_id
- logger.warn "no match for the instance: #{inst_map[:uuid]}"
- return
- end
-
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
- raise "UnknownNetworkId" if network_map.nil?
-
- vif = inst_map[:instance_nics].first[:vif]
-
- # support IP protocol
- protocol_maps = {
- 'ip4' => 'ip4',
- 'arp' => 'arp',
- #'ip6' => 'ip6',
- #'rarp' => '0x8035',
- }
-
- # make chain names.
- chains = []
- chains << "s_#{vif}"
- chains << "d_#{vif}"
- chains << "s_#{vif}_d_hst"
- chains << "d_#{vif}_s_hst"
- protocol_maps.each { |k,v|
- chains << "s_#{vif}_#{k}"
- chains << "d_#{vif}_#{k}"
- chains << "s_#{vif}_d_hst_#{k}"
- chains << "d_#{vif}_s_hst_#{k}"
- }
-
- # clear rules if exists.
- system("ebtables -L s_#{vif} >/dev/null 2>&1")
- if $?.exitstatus == 0
- cmd = "ebtables -D FORWARD -i #{vif} -j s_#{vif}"
- puts cmd
- system(cmd)
- end
-
- system("ebtables -L d_#{vif} >/dev/null 2>&1")
- if $?.exitstatus == 0
- cmd = "ebtables -D FORWARD -o #{vif} -j d_#{vif}"
- puts cmd
- system(cmd)
- end
-
- system("ebtables -L s_#{vif}_d_hst >/dev/null 2>&1")
- if $?.exitstatus == 0
- cmd = "ebtables -D INPUT -i #{vif} -j s_#{vif}_d_hst"
- puts cmd
- system(cmd)
- end
-
- system("ebtables -L d_#{vif}_s_hst >/dev/null 2>&1")
- if $?.exitstatus == 0
- cmd = "ebtables -D OUTPUT -o #{vif} -j d_#{vif}_s_hst"
- puts cmd
- system(cmd)
- end
-
- [ 'F', 'Z', 'X' ].each { |xcmd|
- chains.each { |chain|
- system("ebtables -L #{chain} >/dev/null 2>&1")
- if $?.exitstatus == 0
- cmd = "ebtables -#{xcmd} #{chain}"
- puts cmd
- system(cmd)
- end
- }
- }
-
- logger.debug("flush_ebtables: #{inst_map[:uuid]} #{vif} done.")
- end
-
- def flush_iptables(inst_map = {})
- logger.debug("flush_iptables: #{inst_map[:uuid]} ...")
-
- # Does the hva have instance?
- unless inst_map[:host_pool][:node_id] == node.node_id
- logger.warn "no match for the instance: #{inst_map[:uuid]}"
- return
- end
-
- network_map = rpc.request('hva-collector', 'get_network', inst_map[:host_pool][:network_id])
- raise "UnknownNetworkId" if network_map.nil?
-
- vif = inst_map[:instance_nics].first[:vif]
-
- # support IP protocol
- protocol_maps = {
- 'tcp' => 'tcp',
- 'udp' => 'udp',
- 'icmp' => 'icmp',
- }
-
- # make chain names.
- chains = []
- protocol_maps.each { |k,v|
- chains << "s_#{vif}_#{k}"
- chains << "d_#{vif}_#{k}"
- chains << "s_#{vif}_#{k}_drop"
- chains << "d_#{vif}_#{k}_drop"
- }
- chains << "s_#{vif}"
- chains << "d_#{vif}"
- chains << "s_#{vif}_drop"
- chains << "d_#{vif}_drop"
-
- # metadata-server
- [ 'D' ].each { |xcmd|
- system("iptables -t nat -#{xcmd} PREROUTING -m physdev --physdev-is-bridged --physdev-in #{vif} -s 0.0.0.0 -d 169.254.169.254 -p tcp --dport 80 -j DNAT --to-destination #{network_map[:metadata_server]}:80 >/dev/null 2>&1")
- }
-
- # clean rules if exists.
- system("iptables -nL s_#{vif} >/dev/null 2>&1")
- if $?.exitstatus == 0
- system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-in #{vif} -j s_#{vif}")
- end
-
- system("iptables -nL d_#{vif} >/dev/null 2>&1")
- if $?.exitstatus == 0
- system("iptables -D FORWARD -m physdev --physdev-is-bridged --physdev-out #{vif} -j d_#{vif}")
- end
-
- [ 'F', 'Z', 'X' ].each { |xcmd|
- chains.each { |chain|
- system("iptables -nL #{chain} >/dev/null 2>&1")
- if $?.exitstatus == 0
- system("iptables -#{xcmd} #{chain}")
- end
- }
- }
-
- logger.debug("flush_iptables: #{inst_map[:uuid]} #{vif} done.")
- end
-
- def build_rule(rules = [])
- rule_maps = []
-
- rules.each do |rule|
- rule = rule.strip.gsub(/[\s\t]+/, '')
- from_group = false
- ipv4s = []
-
- # ex.
- # "tcp:22,22,ip4:0.0.0.0"
- # "udp:53,53,ip4:0.0.0.0"
- # "icmp:-1,-1,ip4:0.0.0.0"
-
- # 1st phase
- # ip_dport : tcp,udp? 1 - 16bit, icmp: -1
- # id_port has been separeted in first phase.
- from_pair, ip_dport, source_pair = rule.split(',')
-
- # TODO: more strict validations
- next if from_pair.nil?
- next if ip_dport.nil?
- next if source_pair.nil?
-
- # 2nd phase
- # ip_protocol : [ tcp | udp | icmp ]
- # ip_sport : tcp,udp? 1 - 16bit, icmp: -1
- ip_protocol, ip_sport = from_pair.split(':')
-
- # protocol : [ ip4 | ip6 | #{account_id} ]
- # ip_source : ip4? xxx.xxx.xxx.xxx./[0-32], ip6? (not yet supprted), #{netfilter_group_id}
- protocol, ip_source = source_pair.split(':')
-
- begin
- s = StringScanner.new(protocol)
- until s.eos?
- case
- when s.scan(/ip6/)
- # TODO#FUTURE: support IPv6 address format
- next
- when s.scan(/ip4/)
- # IPAddress does't support prefix '0'.
- ip_addr, prefix = ip_source.split('/', 2)
- if prefix.to_i == 0
- ip_source = ip_addr
- end
- when s.scan(/a-\w{8}/)
- from_group = true
- inst_maps = rpc.request('hva-collector', 'get_instances_of_account_netfilter_group', protocol, ip_source)
- inst_maps.each { |inst_map|
- ipv4s << inst_map[:ips]
- }
- else
- raise "unexpected protocol '#{s.peep(20)}'"
- end
- end
- rescue Exception => e
- p e
- next
- end
-
- begin
- if from_group == false
- #p "from_group:(#{from_group}) ip_source -> #{ip_source}"
- ip = IPAddress(ip_source)
- ip_source = case ip.u32
- when 0
- "#{ip.address}/0"
- else
- "#{ip.address}/#{ip.prefix}"
- end
- else
- ipv4s = ipv4s.flatten.uniq
- end
- rescue Exception => e
- p e
- next
- end
-
- case ip_protocol
- when 'tcp', 'udp'
- if from_group == false
- rule_maps << {
- :ip_protocol => ip_protocol,
- :ip_sport => ip_sport.to_i,
- :ip_dport => ip_dport.to_i,
- :protocol => protocol,
- :ip_source => ip_source,
- }
- else
- ipv4s.each { |ip|
- rule_maps << {
- :ip_protocol => ip_protocol,
- :ip_sport => ip_sport.to_i,
- :ip_dport => ip_dport.to_i,
- :protocol => 'ip4',
- :ip_source => ip,
- }
- }
- end
- when 'icmp'
- # via http://docs.amazonwebservices.com/AWSEC2/latest/CommandLineReference/
- #
- # For the ICMP protocol, the ICMP type and code must be specified.
- # This must be specified in the format type:code where both are integers.
- # Type, code, or both can be specified as -1, which is a wildcard.
-
- icmp_type = ip_dport.to_i
- icmp_code = ip_sport.to_i
-
- # icmp_type
- case icmp_type
- when -1
- when 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
- else
- next
- end
-
- # icmp_code
- case icmp_code
- when -1
- when 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
- # when icmp_type equals -1 icmp_code must equal -1.
- next if icmp_type == -1
- else
- next
- end
-
- if from_group == false
- rule_maps << {
- :ip_protocol => ip_protocol,
- :icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
- :icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
- :protocol => protocol,
- :ip_source => ip_source,
- }
- else
- ipv4s.each { |ip|
- rule_maps << {
- :ip_protocol => ip_protocol,
- :icmp_type => ip_dport.to_i, # ip_dport.to_i, # -1 or 0, 3, 5, 8, 11, 12, 13, 14, 15, 16, 17, 18
- :icmp_code => ip_sport.to_i, # ip_sport.to_i, # -1 or 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
- :protocol => 'ip4',
- :ip_source => ip,
- }
- }
- end
- end
- end
-
- rule_maps
- end
-
- def rpc
- @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
- end
-
- def event
- @event ||= Isono::NodeModules::EventChannel.new(@node)
- end
-
-end
-
-require 'net/telnet'
-
-module KvmHelper
- # Establish telnet connection to KVM monitor console
- def connect_monitor(port, &blk)
- begin
- telnet = ::Net::Telnet.new("Host" => "localhost",
- "Port"=>port.to_s,
- "Prompt" => /\n\(qemu\) \z/,
- "Timeout" => 60,
- "Waittime" => 0.2)
-
- blk.call(telnet)
- rescue => e
- logger.error(e) if self.respond_to?(:logger)
- raise e
- ensure
- telnet.close
- end
- end
-end
-
-class InstanceMonitor < Isono::NodeModules::Base
- include KvmHelper
- include Dcmgr::Logger
-
- initialize_hook do
- @thread_pool = Isono::ThreadPool.new(1)
- @monitor = EventMachine::PeriodicTimer.new(5) {
- @thread_pool.pass {
- myinstance.check_instance
- }
- }
- end
-
- terminate_hook do
- @monitor.cancel
- @thread_pool.shutdown
- end
-
- def check_instance()
- instlst = rpc.request('hva-collector', 'get_alive_instances', manifest.node_id)
- instlst.find_all{|i| i[:state] == 'running' }.each { |i|
- begin
- check_kvm_process(i)
- rescue Exception => e
- if i[:status] == 'online'
- logger.error("#{e.class}, #{e.message}")
-
- rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:offline}) { |req|
- req.oneshot = true
- }
- event.publish('hva/fault_instance', :args=>[i[:uuid]])
- end
- next
- end
-
- if i[:status] != 'online'
- rpc.request('hva-collector', 'update_instance', i[:uuid], {:status=>:online}) { |req|
- req.oneshot = true
- }
- end
- }
- end
-
- private
- def check_kvm_process(i)
- pid = File.read(File.expand_path("#{i[:uuid]}/kvm.pid", node.manifest.config.vm_data_dir)).to_i
- unless File.exists?(File.expand_path(pid.to_s, '/proc'))
- raise "Unable to find the pid of kvm process: #{pid}"
- end
- end
-
- def rpc
- @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
- end
-
- def event
- @event ||= Isono::NodeModules::EventChannel.new(@node)
- end
-end
-
-class KvmHandler < EndpointBuilder
- include Dcmgr::Logger
- include Dcmgr::Helpers::CliHelper
- include KvmHelper
-
- def find_nic(ifindex = 2)
- ifindex_map = {}
- Dir.glob("/sys/class/net/*/ifindex").each do |ifindex_path|
- device_name = File.split(File.split(ifindex_path).first)[1]
- ifindex_num = File.readlines(ifindex_path).first.strip
- ifindex_map[ifindex_num] = device_name
- end
- #p ifindex_map
- ifindex_map[ifindex.to_s]
- end
-
- def nic_state(if_name = 'eth0')
- operstate_path = "/sys/class/net/#{if_name}/operstate"
- if File.exists?(operstate_path)
- File.readlines(operstate_path).first.strip
- end
- end
-
- def run_kvm(os_devpath)
- # run vm
- cmd = "kvm -m %d -smp %d -name vdc-%s -vnc :%d -drive file=%s -pidfile %s -daemonize -monitor telnet::%d,server,nowait"
- args=[@inst[:instance_spec][:memory_size],
- @inst[:instance_spec][:cpu_cores],
- @inst_id,
- @inst[:runtime_config][:vnc_port],
- os_devpath,
- File.expand_path('kvm.pid', @inst_data_dir),
- @inst[:runtime_config][:telnet_port]
- ]
- if vnic = @inst[:instance_nics].first
- cmd += " -net nic,macaddr=%s -net tap,ifname=%s,script=,downscript="
- args << vnic[:mac_addr].unpack('A2'*6).join(':')
- args << vnic[:uuid]
- end
- sh(cmd, args)
-
- unless vnic.nil?
- network_map = rpc.request('hva-collector', 'get_network', @inst[:instance_nics].first[:network_id])
-
- # physical interface
- physical_if = find_nic(@node.manifest.config.hv_ifindex)
- raise "UnknownPhysicalNIC" if physical_if.nil?
-
- if network_map[:vlan_id] == 0
- # bridge interface
- p bridge_if = @node.manifest.config.bridge_novlan
- unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
- sh("/usr/sbin/brctl addbr %s", [bridge_if])
- sh("/usr/sbin/brctl addif %s %s", [bridge_if, physical_if])
- end
- else
- # vlan interface
- vlan_if = "#{physical_if}.#{network_map[:vlan_id]}"
- unless FileTest.exist?("/sys/class/net/#{vlan_if}/ifindex")
- sh("/sbin/vconfig add #{physical_if} #{network_map[:vlan_id]}")
- end
-
- # bridge interface
- bridge_if = "#{@node.manifest.config.bridge_prefix}-#{physical_if}.#{network_map[:vlan_id]}"
- unless FileTest.exist?("/sys/class/net/#{bridge_if}/ifindex")
- sh("/usr/sbin/brctl addbr %s", [bridge_if])
- sh("/usr/sbin/brctl addif %s %s", [bridge_if, vlan_if])
- end
- end
-
-
- # interface up? down?
- [ vlan_if, bridge_if ].each do |ifname|
- if nic_state(ifname) == "down"
- sh("/sbin/ifconfig #{ifname} 0.0.0.0 up")
- end
- end
-
- sh("/sbin/ifconfig %s 0.0.0.0 up", [vnic[:uuid]])
- sh("/usr/sbin/brctl addif %s %s", [bridge_if, vnic[:uuid]])
- end
- end
-
- def attach_volume_to_host
- # check under until the dev file is created.
- # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
- linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
- @vol[:transport_information][:iqn],
- @vol[:transport_information][:lun]]
-
- tryagain do
- next true if File.exist?(linux_dev_path)
-
- sh("iscsiadm -m discovery -t sendtargets -p %s", [@vol[:storage_pool][:ipaddr]])
- sh("iscsiadm -m node -l -T '%s' --portal '%s'",
- [@vol[:transport_information][:iqn], @vol[:storage_pool][:ipaddr]])
- sleep 1
- end
-
- rpc.request('sta-collector', 'update_volume', {
- :volume_id=>@vol_id,
- :state=>:attaching,
- :host_device_name => linux_dev_path})
- end
-
- def detach_volume_from_host
- # iscsi logout
- sh("iscsiadm -m node -T '%s' --logout", [@vol[:transport_information][:iqn]])
-
- rpc.request('sta-collector', 'update_volume', {
- :volume_id=>@vol_id,
- :state=>:available,
- :host_device_name=>nil,
- :instance_id=>nil,
- })
- event.publish('hva/volume_detached', :args=>[@inst_id, @vol_id])
- end
-
- def terminate_instance
- kvm_pid=`pgrep -u root -f vdc-#{@inst_id}`
- if $?.exitstatus == 0 && kvm_pid.to_s =~ /^\d+$/
- sh("/bin/kill #{kvm_pid}")
- else
- logger.error("Can not find the KVM process. Skipping: kvm -name vdc-#{@inst_id}")
- end
- end
-
- def update_instance_state(opts, ev)
- raise "Can't update instance info without setting @inst_id" if @inst_id.nil?
- rpc.request('hva-collector', 'update_instance', @inst_id, opts)
- event.publish(ev, :args=>[@inst_id])
- end
-
- def update_volume_state(opts, ev)
- raise "Can't update volume info without setting @vol_id" if @vol_id.nil?
- rpc.request('sta-collector', 'update_volume', opts.merge(:volume_id=>@vol_id))
- event.publish(ev, :args=>[@vol_id])
- end
-
- job :run_local_store, proc {
- @inst_id = request.args[0]
- logger.info("Booting #{@inst_id}")
-
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
-
- rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
- # setup vm data folder
- @inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
- FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
- # copy image file
- img_src = @inst[:image][:source]
- case img_src[:type].to_sym
- when :http
- img_path = File.expand_path("#{@inst[:uuid]}", @inst_data_dir)
- sh("curl --silent -o '#{img_path}' #{img_src[:uri]}")
- else
- raise "Unknown image source type: #{img_src[:type]}"
- end
-
- run_kvm(img_path)
- update_instance_state({:state=>:running}, 'hva/instance_started')
- }, proc {
- update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
- 'hva/instance_terminated')
- }
-
- job :run_vol_store, proc {
- @inst_id = request.args[0]
- @vol_id = request.args[1]
-
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
- logger.info("Booting #{@inst_id}")
- raise "Invalid instance state: #{@inst[:state]}" unless %w(init failingover).member?(@inst[:state].to_s)
-
- rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:starting})
-
- # setup vm data folder
- @inst_data_dir = File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir)
- FileUtils.mkdir(@inst_data_dir) unless File.exists?(@inst_data_dir)
-
- # create volume from snapshot
- jobreq.run("zfs-handle.#{@vol[:storage_pool][:node_id]}", "create_volume", @vol_id)
-
- logger.debug("volume created on #{@vol[:storage_pool][:node_id]}: #{@vol_id}")
- # reload volume info
- @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
-
- rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
- logger.info("Attaching #{@vol_id} on #{@inst_id}")
- # check under until the dev file is created.
- # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
- linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
- @vol[:transport_information][:iqn],
- @vol[:transport_information][:lun]]
-
- # attach disk
- attach_volume_to_host
-
- # run vm
- run_kvm(linux_dev_path)
- update_instance_state({:state=>:running}, 'hva/instance_started')
- update_volume_state({:state=>:attached}, 'hva/volume_attached')
- }, proc {
- update_instance_state({:state=>:terminated, :terminated_at=>Time.now},
- 'hva/instance_terminated')
- }
-
- job :terminate do
- @inst_id = request.args[0]
-
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
-
- begin
- rpc.request('hva-collector', 'update_instance', @inst_id, {:state=>:shuttingdown})
-
- terminate_instance
-
- unless @inst[:volume].nil?
- @inst[:volume].each { |volid, v|
- @vol_id = volid
- @vol = v
- # force to continue detaching volumes during termination.
- detach_volume_from_host rescue logger.error($!)
- }
- end
-
- # cleanup vm data folder
- FileUtils.rm_r(File.expand_path("#{@inst_id}", @node.manifest.config.vm_data_dir))
- ensure
- update_instance_state({:state=>:terminated,:terminated_at=>Time.now},
- 'hva/instance_terminated')
- end
- end
-
- # just do terminate instance and unmount volumes not to affect
- # state management.
- # called from HA at which the faluty instance get cleaned properly.
- job :cleanup do
- @inst_id = request.args[0]
-
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- raise "Invalid instance state: #{@inst[:state]}" unless @inst[:state].to_s == 'running'
-
- begin
- terminate_instance
-
- unless @inst[:volume].nil?
- @inst[:volume].each { |volid, v|
- @vol_id = volid
- @vol = v
- # force to continue detaching volumes during termination.
- detach_volume_from_host rescue logger.error($!)
- }
- end
- end
-
- end
-
- job :attach, proc {
- @inst_id = request.args[0]
- @vol_id = request.args[1]
-
- @job = Dcmgr::Stm::VolumeContext.new(@vol_id)
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
- logger.info("Attaching #{@vol_id}")
- @job.stm.state = @vol[:state].to_sym
- raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'available'
-
- @job.stm.on_attach
- rpc.request('sta-collector', 'update_volume', {:volume_id=>@vol_id, :state=>:attaching})
- # check under until the dev file is created.
- # /dev/disk/by-path/ip-192.168.1.21:3260-iscsi-iqn.1986-03.com.sun:02:a1024afa-775b-65cf-b5b0-aa17f3476bfc-lun-0
- linux_dev_path = "/dev/disk/by-path/ip-%s-iscsi-%s-lun-%d" % ["#{@vol[:storage_pool][:ipaddr]}:3260",
- @vol[:transport_information][:iqn],
- @vol[:transport_information][:lun]]
-
- # attach disk on host os
- attach_volume_to_host
-
- logger.info("Attaching #{@vol_id} on #{@inst_id}")
- @job.stm.on_attach
- @job.on_attach
-
- # attach disk on guest os
-
- # pci_devddr consists of three hex numbers with colon separator.
- # dom <= 0xffff && bus <= 0xff && val <= 0x1f
- # see: qemu-0.12.5/hw/pci.c
- # /*
- # * Parse [[<domain>:]<bus>:]<slot>, return -1 on error
- # */
- # static int pci_parse_devaddr(const char *addr, int *domp, int *busp, unsigned *slotp)
- pci_devaddr = nil
-
- sddev = File.expand_path(File.readlink(linux_dev_path), '/dev/disk/by-path')
- connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
- # success message:
- # OK domain 0, bus 0, slot 4, function 0
- # error message:
- # failed to add file=/dev/xxxx,if=virtio
- c = t.cmd("pci_add auto storage file=#{sddev},if=scsi")
- # Note: pci_parse_devaddr() called in "pci_add" uses strtoul()
- # with base 16 so that the input is expected in hex. however
- # at the result display, void pci_device_hot_add_print() uses
- # %d for showing bus and slot addresses. use hex to preserve
- # those values to keep consistent.
- if c =~ /\nOK domain ([0-9a-fA-F]+), bus ([0-9a-fA-F]+), slot ([0-9a-fA-F]+), function/m
- # numbers in OK result is decimal. convert them to hex.
- pci_devaddr = [$1, $2, $3].map{|i| i.to_i.to_s(16) }
- else
- raise "Error in qemu console: #{c}"
- end
-
- # double check the pci address.
- c = t.cmd("info pci")
-
- # static void pci_info_device(PCIBus *bus, PCIDevice *d)
- # called in "info pci" gets back PCI bus info with %d.
- if c.split(/\n/).grep(/^\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
- raise "Could not find new disk device attached to qemu-kvm: #{pci_devaddr.join(':')}"
- end
- }
-
- rpc.request('sta-collector', 'update_volume', @job.to_hash(:guest_device_name=>pci_devaddr.join(':')))
- event.publish('hva/volume_attached', :args=>[@inst_id, @vol_id])
- logger.info("Attached #{@vol_id} on #{@inst_id}")
- }
-
- job :detach do
- @inst_id = request.args[0]
- @vol_id = request.args[1]
-
- @job = Dcmgr::Stm::VolumeContext.new(@vol_id)
- @inst = rpc.request('hva-collector', 'get_instance', @inst_id)
- @vol = rpc.request('sta-collector', 'get_volume', @vol_id)
- logger.info("Detaching #{@vol_id} on #{@inst_id}")
- @job.stm.state = @vol[:state].to_sym
- raise "Invalid volume state: #{@vol[:state]}" unless @vol[:state].to_s == 'attached'
-
- @job.stm.on_detach
- rpc.request('sta-collector', 'update_volume', @job.to_hash)
- # detach disk on guest os
- pci_devaddr = @vol[:guest_device_name]
-
- connect_monitor(@inst[:runtime_config][:telnet_port]) { |t|
- t.cmd("pci_del #{pci_devaddr}")
- #
- # Bus 0, device 4, function 0:
- # SCSI controller: PCI device 1af4:1001
- # IRQ 0.
- # BAR0: I/O at 0x1000 [0x103f].
- # BAR1: 32 bit memory at 0x08000000 [0x08000fff].
- # id ""
- c = t.cmd("info pci")
- pci_devaddr = pci_devaddr.split(':')
- unless c.split(/\n/).grep(/\s+Bus\s+#{pci_devaddr[1].to_i(16)}, device\s+#{pci_devaddr[2].to_i(16)}, function/).empty?
- raise "Detached disk device still be attached in qemu-kvm: #{pci_devaddr.join(':')}"
- end
- }
-
- detach_volume_from_host
-
- @job.stm.on_detach
- @job.on_detach
- end
-
- def rpc
- @rpc ||= Isono::NodeModules::RpcChannel.new(@node)
- end
-
- def jobreq
- @jobreq ||= Isono::NodeModules::JobChannel.new(@node)
- end
-
- def event
- @event ||= Isono::NodeModules::EventChannel.new(@node)
- end
-end
-
-
manifest = DEFAULT_MANIFEST.dup
manifest.instance_eval do
node_name 'hva'
node_instance_id "#{Isono::Util.default_gw_ipaddr}"
load_module Isono::NodeModules::NodeHeartbeat
- load_module ServiceNetfilter
- load_module InstanceMonitor
+ load_module Dcmgr::NodeModules::ServiceNetfilter
+ load_module Dcmgr::NodeModules::InstanceMonitor
config do |c|
c.vm_data_dir = '/var/lib/vm'
c.enable_ebtables = true
c.enable_iptables = true
c.hv_ifindex = 2 # ex. /sys/class/net/eth0/ifindex => 2
c.bridge_prefix = 'br'
c.bridge_novlan = 'br0'
+ c.verbose_netfilter = false
+ c.packet_drop_log = false
+ c.debug_iptables = false
end
- config_path File.expand_path('config/hva.conf', app_root)
- load_config
+ load_config File.expand_path('config/hva.conf', app_root)
end
start(manifest) do
- endpoint "kvm-handle.#{@node.node_id}", KvmHandler
+ endpoint "kvm-handle.#{@node.node_id}", Dcmgr::Rpc::HvaHandler
end