generated/google/apis/remotebuildexecution_v1/classes.rb in google-api-client-0.37.3 vs generated/google/apis/remotebuildexecution_v1/classes.rb in google-api-client-0.38.0

- old
+ new

@@ -43,12 +43,11 @@ # correct. class BuildBazelRemoteExecutionV2Action include Google::Apis::Core::Hashable # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -81,12 +80,11 @@ # @return [Boolean] attr_accessor :do_not_cache alias_method :do_not_cache?, :do_not_cache # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -111,10 +109,21 @@ # concatenating two messages to merge them may produce duplicate fields. # Corresponds to the JSON property `inputRootDigest` # @return [Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2Digest] attr_accessor :input_root_digest + # List of required supported NodeProperty + # keys. In order to ensure that equivalent `Action`s always hash to the same + # value, the supported node properties MUST be lexicographically sorted by name. + # Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. + # The interpretation of these properties is server-dependent. If a property is + # not recognized by the server, the server will return an `INVALID_ARGUMENT` + # error. + # Corresponds to the JSON property `outputNodeProperties` + # @return [Array<String>] + attr_accessor :output_node_properties + # A timeout after which the execution should be killed. If the timeout is # absent, then the client is specifying that the execution should continue # as long as the server will let it. The server SHOULD impose a timeout if # the client does not specify one, however, if the client does specify a # timeout that is longer than the server's maximum timeout, the server MUST @@ -139,10 +148,11 @@ # Update properties of this object def update!(**args) @command_digest = args[:command_digest] if args.key?(:command_digest) @do_not_cache = args[:do_not_cache] if args.key?(:do_not_cache) @input_root_digest = args[:input_root_digest] if args.key?(:input_root_digest) + @output_node_properties = args[:output_node_properties] if args.key?(:output_node_properties) @timeout = args[:timeout] if args.key?(:timeout) end end # An ActionResult represents the result of an @@ -159,13 +169,13 @@ # Corresponds to the JSON property `exitCode` # @return [Fixnum] attr_accessor :exit_code # The output directories of the action. For each output directory requested - # in the `output_directories` field of the Action, if the corresponding - # directory existed after the action completed, a single entry will be - # present in the output list, which will contain the digest of a + # in the `output_directories` or `output_paths` field of the Action, if the + # corresponding directory existed after the action completed, a single entry + # will be present in the output list, which will contain the digest of a # Tree message containing the # directory tree, and the path equal exactly to the corresponding Action # output_directories member. # As an example, suppose the Action had an output directory `a/b/dir` and the # execution produced the following contents in `a/b/dir`: a file named `bar` @@ -215,11 +225,12 @@ # ` # ] # ` # ` # ``` - # If an output of the same name was found, but was not a directory, the + # If an output of the same name as listed in `output_files` of + # the Command was found in `output_directories`, but was not a directory, the # server will return a FAILED_PRECONDITION. # Corresponds to the JSON property `outputDirectories` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputDirectory>] attr_accessor :output_directories @@ -235,48 +246,69 @@ # If an output of the same name was found, but was a symbolic link to a file # instead of a directory, the server will return a FAILED_PRECONDITION. # If the action does not produce the requested output, then that output # will be omitted from the list. The server is free to arrange the output # list as desired; clients MUST NOT assume that the output list is sorted. + # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API + # should still populate this field in addition to `output_symlinks`. # Corresponds to the JSON property `outputDirectorySymlinks` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>] attr_accessor :output_directory_symlinks # The output files of the action that are symbolic links to other files. Those # may be links to other output files, or input files, or even absolute paths # outside of the working directory, if the server supports # SymlinkAbsolutePathStrategy.ALLOWED. - # For each output file requested in the `output_files` field of the Action, - # if the corresponding file existed after + # For each output file requested in the `output_files` or `output_paths` + # field of the Action, if the corresponding file existed after # the action completed, a single entry will be present either in this field, # or in the `output_files` field, if the file was not a symbolic link. - # If an output symbolic link of the same name was found, but its target - # type was not a regular file, the server will return a FAILED_PRECONDITION. + # If an output symbolic link of the same name as listed in `output_files` of + # the Command was found, but its target type was not a regular file, the + # server will return a FAILED_PRECONDITION. # If the action does not produce the requested output, then that output # will be omitted from the list. The server is free to arrange the output # list as desired; clients MUST NOT assume that the output list is sorted. + # DEPRECATED as of v2.1. Servers that wish to be compatible with v2.0 API + # should still populate this field in addition to `output_symlinks`. # Corresponds to the JSON property `outputFileSymlinks` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>] attr_accessor :output_file_symlinks # The output files of the action. For each output file requested in the - # `output_files` field of the Action, if the corresponding file existed after - # the action completed, a single entry will be present either in this field, - # or the `output_file_symlinks` field if the file was a symbolic link to - # another file. - # If an output of the same name was found, but was a directory rather + # `output_files` or `output_paths` field of the Action, if the corresponding + # file existed after the action completed, a single entry will be present + # either in this field, or the `output_file_symlinks` field if the file was + # a symbolic link to another file (`output_symlinks` field after v2.1). + # If an output listed in `output_files` was found, but was a directory rather # than a regular file, the server will return a FAILED_PRECONDITION. # If the action does not produce the requested output, then that output # will be omitted from the list. The server is free to arrange the output # list as desired; clients MUST NOT assume that the output list is sorted. # Corresponds to the JSON property `outputFiles` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputFile>] attr_accessor :output_files + # New in v2.1: this field will only be populated if the command + # `output_paths` field was used, and not the pre v2.1 `output_files` or + # `output_directories` fields. + # The output paths of the action that are symbolic links to other paths. Those + # may be links to other outputs, or inputs, or even absolute paths + # outside of the working directory, if the server supports + # SymlinkAbsolutePathStrategy.ALLOWED. + # A single entry for each output requested in `output_paths` + # field of the Action, if the corresponding path existed after + # the action completed and was a symbolic link. + # If the action does not produce a requested output, then that output + # will be omitted from the list. The server is free to arrange the output + # list as desired; clients MUST NOT assume that the output list is sorted. + # Corresponds to the JSON property `outputSymlinks` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2OutputSymlink>] + attr_accessor :output_symlinks + # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -313,12 +345,11 @@ # NOTE: Values are automatically base64 encoded/decoded in the client library. # @return [String] attr_accessor :stderr_raw # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -366,10 +397,11 @@ @exit_code = args[:exit_code] if args.key?(:exit_code) @output_directories = args[:output_directories] if args.key?(:output_directories) @output_directory_symlinks = args[:output_directory_symlinks] if args.key?(:output_directory_symlinks) @output_file_symlinks = args[:output_file_symlinks] if args.key?(:output_file_symlinks) @output_files = args[:output_files] if args.key?(:output_files) + @output_symlinks = args[:output_symlinks] if args.key?(:output_symlinks) @stderr_digest = args[:stderr_digest] if args.key?(:stderr_digest) @stderr_raw = args[:stderr_raw] if args.key?(:stderr_raw) @stdout_digest = args[:stdout_digest] if args.key?(:stdout_digest) @stdout_raw = args[:stdout_raw] if args.key?(:stdout_raw) end @@ -423,10 +455,11 @@ # the listed output files. An output directory is allowed to be a parent of # another output directory. # Directories leading up to the output directories (but not the output # directories themselves) are created by the worker prior to execution, even # if they are not explicitly part of the input root. + # DEPRECATED since 2.1: Use `output_paths` instead. # Corresponds to the JSON property `outputDirectories` # @return [Array<String>] attr_accessor :output_directories # A list of the output files that the client expects to retrieve from the @@ -444,14 +477,43 @@ # bytes). # An output file cannot be duplicated, be a parent of another output file, or # have the same path as any of the listed output directories. # Directories leading up to the output files are created by the worker prior # to execution, even if they are not explicitly part of the input root. + # DEPRECATED since v2.1: Use `output_paths` instead. # Corresponds to the JSON property `outputFiles` # @return [Array<String>] attr_accessor :output_files + # A list of the output paths that the client expects to retrieve from the + # action. Only the listed paths will be returned to the client as output. + # The type of the output (file or directory) is not specified, and will be + # determined by the server after action execution. If the resulting path is + # a file, it will be returned in an + # OutputFile) typed field. + # If the path is a directory, the entire directory structure will be returned + # as a Tree message digest, see + # OutputDirectory) + # Other files or directories that may be created during command execution + # are discarded. + # The paths are relative to the working directory of the action execution. + # The paths are specified using a single forward slash (`/`) as a path + # separator, even if the execution platform natively uses a different + # separator. The path MUST NOT include a trailing slash, nor a leading slash, + # being a relative path. + # In order to ensure consistent hashing of the same Action, the output paths + # MUST be deduplicated and sorted lexicographically by code point (or, + # equivalently, by UTF-8 bytes). + # Directories leading up to the output paths are created by the worker prior + # to execution, even if they are not explicitly part of the input root. + # New in v2.1: this field supersedes the DEPRECATED `output_files` and + # `output_directories` fields. If `output_paths` is used, `output_files` and + # `output_directories` will be ignored! + # Corresponds to the JSON property `outputPaths` + # @return [Array<String>] + attr_accessor :output_paths + # A `Platform` is a set of requirements, such as hardware, operating system, or # compiler toolchain, for an # Action's execution # environment. A `Platform` is represented as a series of key-value pairs # representing the properties that are required of the platform. @@ -474,10 +536,11 @@ def update!(**args) @arguments = args[:arguments] if args.key?(:arguments) @environment_variables = args[:environment_variables] if args.key?(:environment_variables) @output_directories = args[:output_directories] if args.key?(:output_directories) @output_files = args[:output_files] if args.key?(:output_files) + @output_paths = args[:output_paths] if args.key?(:output_paths) @platform = args[:platform] if args.key?(:platform) @working_directory = args[:working_directory] if args.key?(:working_directory) end end @@ -506,12 +569,11 @@ @value = args[:value] if args.key?(:value) end end # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -578,10 +640,13 @@ # children, but the Action may be rejected by the remote system upon # execution. # * The files, directories and symlinks in the directory must each be sorted # in lexicographical order by path. The path strings must be sorted by code # point, equivalently, by UTF-8 bytes. + # * The NodeProperties of files, + # directories, and symlinks must be sorted in lexicographical order by + # property name. # A `Directory` that obeys the restrictions is said to be in canonical form. # As an example, the following could be used for a file named `bar` and a # directory named `foo` with an executable file named `baz` (hashes shortened # for readability): # ```json @@ -591,12 +656,18 @@ # ` # name: "bar", # digest: ` # hash: "4a73bc9d03...", # size: 65534 + # `, + # node_properties: [ # ` + # "name": "MTime", + # "value": "2017-01-15T01:30:15.01Z" # ` + # ] + # ` # ], # directories: [ # ` # name: "foo", # digest: ` @@ -631,10 +702,15 @@ # The files in the directory. # Corresponds to the JSON property `files` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2FileNode>] attr_accessor :files + # The node properties of the Directory. + # Corresponds to the JSON property `nodeProperties` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>] + attr_accessor :node_properties + # The symlinks in the directory. # Corresponds to the JSON property `symlinks` # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2SymlinkNode>] attr_accessor :symlinks @@ -644,10 +720,11 @@ # Update properties of this object def update!(**args) @directories = args[:directories] if args.key?(:directories) @files = args[:files] if args.key?(:files) + @node_properties = args[:node_properties] if args.key?(:node_properties) @symlinks = args[:symlinks] if args.key?(:symlinks) end end # A `DirectoryNode` represents a child of a @@ -655,12 +732,11 @@ # a `Directory` and its associated metadata. class BuildBazelRemoteExecutionV2DirectoryNode include Google::Apis::Core::Hashable # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -710,12 +786,11 @@ # Operation. class BuildBazelRemoteExecutionV2ExecuteOperationMetadata include Google::Apis::Core::Hashable # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -911,12 +986,11 @@ # A `FileNode` represents a single file and associated metadata. class BuildBazelRemoteExecutionV2FileNode include Google::Apis::Core::Hashable # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -952,29 +1026,34 @@ # The name of the file. # Corresponds to the JSON property `name` # @return [String] attr_accessor :name + # The node properties of the FileNode. + # Corresponds to the JSON property `nodeProperties` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>] + attr_accessor :node_properties + def initialize(**args) update!(**args) end # Update properties of this object def update!(**args) @digest = args[:digest] if args.key?(:digest) @is_executable = args[:is_executable] if args.key?(:is_executable) @name = args[:name] if args.key?(:name) + @node_properties = args[:node_properties] if args.key?(:node_properties) end end # A `LogFile` is a log stored in the CAS. class BuildBazelRemoteExecutionV2LogFile include Google::Apis::Core::Hashable # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -1020,10 +1099,39 @@ @digest = args[:digest] if args.key?(:digest) @human_readable = args[:human_readable] if args.key?(:human_readable) end end + # A single property for FileNodes, + # DirectoryNodes, and + # SymlinkNodes. The server is + # responsible for specifying the property `name`s that it accepts. If + # permitted by the server, the same `name` may occur multiple times. + class BuildBazelRemoteExecutionV2NodeProperty + include Google::Apis::Core::Hashable + + # The property name. + # Corresponds to the JSON property `name` + # @return [String] + attr_accessor :name + + # The property value. + # Corresponds to the JSON property `value` + # @return [String] + attr_accessor :value + + def initialize(**args) + update!(**args) + end + + # Update properties of this object + def update!(**args) + @name = args[:name] if args.key?(:name) + @value = args[:value] if args.key?(:value) + end + end + # An `OutputDirectory` is the output in an `ActionResult` corresponding to a # directory's full contents rather than a single file. class BuildBazelRemoteExecutionV2OutputDirectory include Google::Apis::Core::Hashable @@ -1034,12 +1142,11 @@ # Corresponds to the JSON property `path` # @return [String] attr_accessor :path # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -1095,12 +1202,11 @@ # NOTE: Values are automatically base64 encoded/decoded in the client library. # @return [String] attr_accessor :contents # A content digest. A digest for a given blob consists of the size of the blob - # and its hash. The hash algorithm to use is defined by the server, but servers - # SHOULD use SHA-256. + # and its hash. The hash algorithm to use is defined by the server. # The size is considered to be an integral part of the digest and cannot be # separated. That is, even if the `hash` field is correctly specified but # `size_bytes` is not, the server MUST reject the request. # The reason for including the size in the digest is as follows: in a great # many cases, the server needs to know the size of the blob it is about to work @@ -1131,10 +1237,15 @@ # Corresponds to the JSON property `isExecutable` # @return [Boolean] attr_accessor :is_executable alias_method :is_executable?, :is_executable + # The supported node properties of the OutputFile, if requested by the Action. + # Corresponds to the JSON property `nodeProperties` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>] + attr_accessor :node_properties + # The full path of the file relative to the working directory, including the # filename. The path separator is a forward slash `/`. Since this is a # relative path, it MUST NOT begin with a leading forward slash. # Corresponds to the JSON property `path` # @return [String] @@ -1147,10 +1258,11 @@ # Update properties of this object def update!(**args) @contents = args[:contents] if args.key?(:contents) @digest = args[:digest] if args.key?(:digest) @is_executable = args[:is_executable] if args.key?(:is_executable) + @node_properties = args[:node_properties] if args.key?(:node_properties) @path = args[:path] if args.key?(:path) end end # An `OutputSymlink` is similar to a @@ -1158,10 +1270,16 @@ # output in an `ActionResult`. # `OutputSymlink` is binary-compatible with `SymlinkNode`. class BuildBazelRemoteExecutionV2OutputSymlink include Google::Apis::Core::Hashable + # The supported node properties of the OutputSymlink, if requested by the + # Action. + # Corresponds to the JSON property `nodeProperties` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>] + attr_accessor :node_properties + # The full path of the symlink relative to the working directory, including the # filename. The path separator is a forward slash `/`. Since this is a # relative path, it MUST NOT begin with a leading forward slash. # Corresponds to the JSON property `path` # @return [String] @@ -1181,10 +1299,11 @@ update!(**args) end # Update properties of this object def update!(**args) + @node_properties = args[:node_properties] if args.key?(:node_properties) @path = args[:path] if args.key?(:path) @target = args[:target] if args.key?(:target) end end @@ -1312,10 +1431,15 @@ # The name of the symlink. # Corresponds to the JSON property `name` # @return [String] attr_accessor :name + # The node properties of the SymlinkNode. + # Corresponds to the JSON property `nodeProperties` + # @return [Array<Google::Apis::RemotebuildexecutionV1::BuildBazelRemoteExecutionV2NodeProperty>] + attr_accessor :node_properties + # The target path of the symlink. The path separator is a forward slash `/`. # The target path can be relative to the parent directory of the symlink or # it can be an absolute path starting with `/`. Support for absolute paths # can be checked using the Capabilities # API. The canonical form forbids the substrings `/./` and `//` in the target @@ -1329,10 +1453,11 @@ end # Update properties of this object def update!(**args) @name = args[:name] if args.key?(:name) + @node_properties = args[:node_properties] if args.key?(:node_properties) @target = args[:target] if args.key?(:target) end end # Details for the tool used to call the API. @@ -1393,10 +1518,13 @@ # children, but the Action may be rejected by the remote system upon # execution. # * The files, directories and symlinks in the directory must each be sorted # in lexicographical order by path. The path strings must be sorted by code # point, equivalently, by UTF-8 bytes. + # * The NodeProperties of files, + # directories, and symlinks must be sorted in lexicographical order by + # property name. # A `Directory` that obeys the restrictions is said to be in canonical form. # As an example, the following could be used for a file named `bar` and a # directory named `foo` with an executable file named `baz` (hashes shortened # for readability): # ```json @@ -1406,12 +1534,18 @@ # ` # name: "bar", # digest: ` # hash: "4a73bc9d03...", # size: 65534 + # `, + # node_properties: [ # ` + # "name": "MTime", + # "value": "2017-01-15T01:30:15.01Z" # ` + # ] + # ` # ], # directories: [ # ` # name: "foo", # digest: ` @@ -1478,11 +1612,11 @@ # (includes pulling the Docker image, if necessary). # Corresponds to the JSON property `dockerPrep` # @return [String] attr_accessor :docker_prep - # The timestamp when docker prepartion begins. + # The timestamp when docker preparation begins. # Corresponds to the JSON property `dockerPrepStartTime` # @return [String] attr_accessor :docker_prep_start_time # The time spent downloading the input files and constructing the working @@ -2184,10 +2318,10 @@ # Corresponds to the JSON property `workerConfig` # @return [Google::Apis::RemotebuildexecutionV1::GoogleDevtoolsRemotebuildexecutionAdminV1alphaWorkerConfig] attr_accessor :worker_config # The desired number of workers in the worker pool. Must be a value between - # 0 and 1000. + # 0 and 15000. # Corresponds to the JSON property `workerCount` # @return [Fixnum] attr_accessor :worker_count def initialize(**args)