lib/judo/server.rb in judo-0.1.0 vs lib/judo/server.rb in judo-0.1.4
- old
+ new
@@ -1,68 +1,32 @@
-### NEEDED for new gem launch
-
-### [ ] return right away.. (1 hr)
-### [ ] two phase delete (1 hr)
-### [-] refactor availability_zone (2 hrs)
-### [ ] pick availability zone from config "X":"Y" or "X":["Y","Z"]
-### [ ] assign to state on creation ( could delay till volume creation )
-### [ ] implement auto security_group creation and setup (6 hrs)
-### [ ] write some examples - simple postgres/redis/couchdb server (5hrs)
-### [ ] write new README (4 hrs)
-### [ ] bind kuzushi gem version version
-### [ ] realase new gem! (1 hr)
-
-### [ ] should be able to do ALL actions except commit without the repo!
-### [ ] store git commit hash with commit to block a judo commit if there is newer material stored
-### [ ] remove the tarball - store files a sha hashes in the bucket - makes for faster commits if the files have not changed
-
-### [ ] use a logger service (1 hr)
-### [ ] write specs (5 hr)
-
-### Error Handling
-### [ ] no availability zone before making disks
-### [ ] security group does not exists
-
-### Do Later
-### [ ] use amazon's new conditional write tools so we never have problems from concurrent updates
-### [ ] is thor really what we want to use here?
-### [ ] need to be able to pin a config to a version of kuzushi - gem updates can/will break a lot of things
-### [ ] I want a "judo monitor" command that will make start servers if they go down, and poke a listed port to make sure a service is listening, would be cool if it also detects wrong ami, wrong secuirity group, missing/extra volumes, missing/extra elastic_ip - might not want to force a reboot quite yet in these cases
-### [ ] Implement "judo snapshot [NAME]" to take a snapshot of the ebs's blocks
-### [ ] ruby 1.9.1 support
-### [ ] find a good way to set the hostname or prompt to :name
-### [ ] remove fog/s3 dependancy
-### [ ] enforce template files end in .erb to make room for other possible templates as defined by the extensions
-### [ ] zerigo integration for automatic DNS setup
-### [ ] How cool would it be if this was all reimplemented in eventmachine and could start lots of boxes in parallel? Would need to evented AWS api calls... Never seen a library to do that - would have to write our own... "Fog Machine?"
-
module Judo
class Server
attr_accessor :name
- def initialize(base, name, group)
+ def initialize(base, name, group, version = nil)
@base = base
@name = name
@group_name = group
end
- def create
+ def create(version = group.version, snapshots = nil)
raise JudoError, "no group specified" unless @group_name
if @name.nil?
index = @base.servers.map { |s| (s.name =~ /^#{s.group.name}.(\d*)$/); $1.to_i }.sort.last.to_i + 1
@name = "#{group.name}.#{index}"
end
raise JudoError, "there is already a server named #{name}" if @base.servers.detect { |s| s.name == @name and s != self}
task("Creating server #{name}") do
- update "name" => name, "group" => @group_name, "virgin" => true, "secret" => rand(2 ** 128).to_s(36)
+ update "name" => name, "group" => @group_name, "virgin" => true, "secret" => rand(2 ** 128).to_s(36), "version" => version
@base.sdb.put_attributes("judo_config", "groups", @group_name => name)
end
- allocate_resources
+ allocate_disk(snapshots)
+ allocate_ip
self
end
def group
@@ -96,30 +60,53 @@
"#{ec2_instance_type}/#{instance_size}"
end
end
def version_desc
- return "" unless running?
- if version == group.version
- "v#{version}"
- else
- "v#{version}/#{group.version}"
- end
+ group.version_desc(version)
end
def version
get("version").to_i
end
+ def update_version(new_version)
+ update "version" => new_version
+ end
+
+ def kuzushi_action
+ if virgin?
+ if cloned?
+ "start"
+ else
+ "init"
+ end
+ else
+ "start"
+ end
+ end
+
+ def clone
+ get("clone")
+ end
+
+ def cloned?
+ !!clone
+ end
+
def virgin?
get("virgin").to_s == "true" ## I'm going to set it to true and it will come back from the db as "true" -> could be "false" or false or nil also
end
def secret
get "secret"
end
+ def snapshots
+ base.snapshots.select { |s| s.server == self }
+ end
+
def volumes
Hash[ (state["volumes"] || []).map { |a| a.split(":") } ]
end
def self.domain
@@ -145,11 +132,11 @@
state.delete(key)
end
end
def delete
- group.delete_server(self)
+ group.delete_server(self) if group
@base.sdb.delete_attributes(self.class.domain, name)
end
######## end simple DB access #######
@@ -163,11 +150,28 @@
def to_s
"#{name}:#{@group_name}"
end
- def allocate_resources
+ def allocate_disk(snapshots)
+ if snapshots
+ clone_snapshots(snapshots)
+ else
+ create_volumes
+ end
+ end
+
+ def clone_snapshots(snapshots)
+ snapshots.each do |device,snap_id|
+ task("Creating EC2 Volume #{device} from #{snap_id}") do
+ volume_id = @base.ec2.create_volume(snap_id, nil, config["availability_zone"])[:aws_id]
+ add_volume(volume_id, device)
+ end
+ end
+ end
+
+ def create_volumes
if config["volumes"]
[config["volumes"]].flatten.each do |volume_config|
device = volume_config["device"]
if volume_config["media"] == "ebs"
size = volume_config["size"]
@@ -183,11 +187,13 @@
else
puts "device #{device || volume_config["mount"]} is not of media type 'ebs', skipping..."
end
end
end
+ end
+ def allocate_ip
begin
if config["elastic_ip"] and not elastic_ip
### EC2 allocate_address
task("Adding an elastic ip") do
ip = @base.ec2.allocate_address
@@ -257,21 +263,22 @@
def running?
## other options are "terminated" and "nil"
["pending", "running", "shutting_down", "degraded"].include?(ec2_state)
end
- def start
+ def start(new_version = nil)
invalid "Already running" if running?
invalid "No config has been commited yet, type 'judo commit'" unless group.version > 0
+ task("Updating server version") { update_version(new_version) } if new_version
task("Starting server #{name}") { launch_ec2 }
task("Wait for server") { wait_for_running } if elastic_ip or has_volumes?
task("Attaching ip") { attach_ip } if elastic_ip
task("Attaching volumes") { attach_volumes } if has_volumes?
end
- def restart
- stop if running?
+ def restart(force = false)
+ stop(force) if running?
start
end
def generic_name?
name =~ /^#{group}[.]\d*$/
@@ -283,14 +290,23 @@
def invalid(str)
raise JudoInvalid, str
end
- def stop
+ def force_detach_volumes
+ volumes.each do |device,volume_id|
+ task("Force detaching #{volume_id}") do
+ @base.ec2.detach_volume(volume_id, instance_id, device, true)
+ end
+ end
+ end
+
+ def stop(force = false)
invalid "not running" unless running?
## EC2 terminate_isntaces
task("Terminating instance") { @base.ec2.terminate_instances([ instance_id ]) }
+ force_detach_volumes if force
task("Wait for volumes to detach") { wait_for_volumes_detached } if volumes.size > 0
remove "instance_id"
end
def launch_ec2
@@ -303,12 +319,11 @@
:instance_type => config["instance_size"],
:availability_zone => config["availability_zone"],
:key_name => config["key_name"],
:group_ids => security_groups,
:user_data => ud).first
-
- update "instance_id" => result[:aws_instance_id], "virgin" => false, "version" => group.version
+ update "instance_id" => result[:aws_instance_id], "virgin" => false
end
def debug(str)
return unless ENV['JUDO_DEBUG'] == "1"
puts "<JUDO_DEBUG>#{str}</JUDO_DEBUG>"
@@ -354,14 +369,19 @@
sleep 1
end
end
def wait_for_volumes_detached
- ## FIXME - force if it takes too long
- loop do
- break if ec2_volumes.reject { |v| v[:aws_status] == "available" }.empty?
- sleep 2
+ begin
+ Timeout::timeout(30) do
+ loop do
+ break if ec2_volumes.reject { |v| v[:aws_status] == "available" }.empty?
+ sleep 2
+ end
+ end
+ rescue Timeout::Error
+ force_detach_volumes
end
end
def wait_for_termination
loop do
@@ -501,9 +521,14 @@
@base.create_keypair
else
raise "cannot use key_pair #{config["key_name"]} b/c it does not exist"
end
end
+ end
+
+ def snapshot(name)
+ snap = @base.new_snapshot(name, self.name)
+ snap.create
end
def <=>(s)
[group.name, name] <=> [s.group.name, s.name]
end