syntax = 'proto3'; package gateway_protocol; option java_multiple_files = false; option java_package = "io.zeebe.gateway.protocol"; option go_package = "pb"; // For a more complete documentation, refer to Zeebe documentation at: // https://docs.zeebe.io/grpc/reference.html message ActivateJobsRequest { // the job type, as defined in the BPMN process (e.g. ) string type = 1; // the name of the worker activating the jobs, mostly used for logging purposes string worker = 2; // a job returned after this call will not be activated by another call until the // timeout has been reached int64 timeout = 3; // the maximum jobs to activate by this request int32 maxJobsToActivate = 4; // a list of variables to fetch as the job variables; if empty, all visible variables at // the time of activation for the scope of the job will be returned repeated string fetchVariable = 5; // The request will be completed when at least one job is activated or after the requestTimeout. // if the requestTimeout = 0, a default timeout is used. // if the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. int64 requestTimeout = 6; } message ActivateJobsResponse { // list of activated jobs repeated ActivatedJob jobs = 1; } message ActivatedJob { // the key, a unique identifier for the job int64 key = 1; // the type of the job (should match what was requested) string type = 2; // the job's workflow instance key int64 workflowInstanceKey = 3; // the bpmn process ID of the job workflow definition string bpmnProcessId = 4; // the version of the job workflow definition int32 workflowDefinitionVersion = 5; // the key of the job workflow definition int64 workflowKey = 6; // the associated task element ID string elementId = 7; // the unique key identifying the associated task, unique within the scope of the // workflow instance int64 elementInstanceKey = 8; // a set of custom headers defined during modelling; returned as a serialized // JSON document string customHeaders = 9; // the name of the worker which activated this job string worker = 10; // the amount of retries left to this job (should always be positive) int32 retries = 11; // when the job can be activated again, sent as a UNIX epoch timestamp int64 deadline = 12; // JSON document, computed at activation time, consisting of all visible variables to // the task scope string variables = 13; } message CancelWorkflowInstanceRequest { // the workflow instance key (as, for example, obtained from // CreateWorkflowInstanceResponse) int64 workflowInstanceKey = 1; } message CancelWorkflowInstanceResponse { } message CompleteJobRequest { // the unique job identifier, as obtained from ActivateJobsResponse int64 jobKey = 1; // a JSON document representing the variables in the current task scope string variables = 2; } message CompleteJobResponse { } message CreateWorkflowInstanceRequest { // the unique key identifying the workflow definition (e.g. returned from a workflow // in the DeployWorkflowResponse message) int64 workflowKey = 1; // the BPMN process ID of the workflow definition string bpmnProcessId = 2; // the version of the process; set to -1 to use the latest version int32 version = 3; // JSON document that will instantiate the variables for the root variable scope of the // workflow instance; it must be a JSON object, as variables will be mapped in a // key-value fashion. e.g. { "a": 1, "b": 2 } will create two variables, named "a" and // "b" respectively, with their associated values. [{ "a": 1, "b": 2 }] would not be a // valid argument, as the root of the JSON document is an array and not an object. string variables = 4; } message CreateWorkflowInstanceResponse { // the key of the workflow definition which was used to create the workflow instance int64 workflowKey = 1; // the BPMN process ID of the workflow definition which was used to create the workflow // instance string bpmnProcessId = 2; // the version of the workflow definition which was used to create the workflow instance int32 version = 3; // the unique identifier of the created workflow instance; to be used wherever a request // needs a workflow instance key (e.g. CancelWorkflowInstanceRequest) int64 workflowInstanceKey = 4; } message CreateWorkflowInstanceWithResultRequest { CreateWorkflowInstanceRequest request = 1; // timeout in milliseconds. the request will be closed if the workflow is not completed // before the requestTimeout. // if requestTimeout = 0, uses the generic requestTimeout configured in the gateway. int64 requestTimeout = 2; // list of names of variables to be included in `CreateWorkflowInstanceWithResultResponse.variables` // if empty, all visible variables in the root scope will be returned. repeated string fetchVariables = 3; } message CreateWorkflowInstanceWithResultResponse { // the key of the workflow definition which was used to create the workflow instance int64 workflowKey = 1; // the BPMN process ID of the workflow definition which was used to create the workflow // instance string bpmnProcessId = 2; // the version of the workflow definition which was used to create the workflow instance int32 version = 3; // the unique identifier of the created workflow instance; to be used wherever a request // needs a workflow instance key (e.g. CancelWorkflowInstanceRequest) int64 workflowInstanceKey = 4; // JSON document // consists of visible variables in the root scope string variables = 5; } message DeployWorkflowRequest { // List of workflow resources to deploy repeated WorkflowRequestObject workflows = 1; } message WorkflowRequestObject { enum ResourceType { // FILE type means the gateway will try to detect the resource type // using the file extension of the name field FILE = 0; BPMN = 1; // extension 'bpmn' YAML = 2; // extension 'yaml' } // the resource basename, e.g. myProcess.bpmn string name = 1; // the resource type; if set to BPMN or YAML then the file extension // is ignored ResourceType type = 2; // the process definition as a UTF8-encoded string bytes definition = 3; } message DeployWorkflowResponse { // the unique key identifying the deployment int64 key = 1; // a list of deployed workflows repeated WorkflowMetadata workflows = 2; } message WorkflowMetadata { // the bpmn process ID, as parsed during deployment; together with the version forms a // unique identifier for a specific workflow definition string bpmnProcessId = 1; // the assigned process version int32 version = 2; // the assigned key, which acts as a unique identifier for this workflow int64 workflowKey = 3; // the resource name (see: WorkflowRequestObject.name) from which this workflow was // parsed string resourceName = 4; } message FailJobRequest { // the unique job identifier, as obtained when activating the job int64 jobKey = 1; // the amount of retries the job should have left int32 retries = 2; // an optional message describing why the job failed // this is particularly useful if a job runs out of retries and an incident is raised, // as it this message can help explain why an incident was raised string errorMessage = 3; } message FailJobResponse { } message ThrowErrorRequest { // the unique job identifier, as obtained when activating the job int64 jobKey = 1; // the error code that will be matched with an error catch event string errorCode = 2; // an optional error message that provides additional context string errorMessage = 3; } message ThrowErrorResponse { } message PublishMessageRequest { // the name of the message string name = 1; // the correlation key of the message string correlationKey = 2; // how long the message should be buffered on the broker, in milliseconds int64 timeToLive = 3; // the unique ID of the message; can be omitted. only useful to ensure only one message // with the given ID will ever be published (during its lifetime) string messageId = 4; // the message variables as a JSON document; to be valid, the root of the document must be an // object, e.g. { "a": "foo" }. [ "foo" ] would not be valid. string variables = 5; } message PublishMessageResponse { } message ResolveIncidentRequest { // the unique ID of the incident to resolve int64 incidentKey = 1; } message ResolveIncidentResponse { } message TopologyRequest { } message TopologyResponse { // list of brokers part of this cluster repeated BrokerInfo brokers = 1; // how many nodes are in the cluster int32 clusterSize = 2; // how many partitions are spread across the cluster int32 partitionsCount = 3; // configured replication factor for this cluster int32 replicationFactor = 4; // gateway version string gatewayVersion = 5; } message BrokerInfo { // unique (within a cluster) node ID for the broker int32 nodeId = 1; // hostname of the broker string host = 2; // port for the broker int32 port = 3; // list of partitions managed or replicated on this broker repeated Partition partitions = 4; // broker version string version = 5; } message Partition { // Describes the Raft role of the broker for a given partition enum PartitionBrokerRole { LEADER = 0; FOLLOWER = 1; } // the unique ID of this partition int32 partitionId = 1; // the role of the broker for this partition PartitionBrokerRole role = 2; } message UpdateJobRetriesRequest { // the unique job identifier, as obtained through ActivateJobs int64 jobKey = 1; // the new amount of retries for the job; must be positive int32 retries = 2; } message UpdateJobRetriesResponse { } message SetVariablesRequest { // the unique identifier of a particular element; can be the workflow instance key (as // obtained during instance creation), or a given element, such as a service task (see // elementInstanceKey on the job message) int64 elementInstanceKey = 1; // a JSON serialized document describing variables as key value pairs; the root of the document // must be an object string variables = 2; // if true, the variables will be merged strictly into the local scope (as indicated by // elementInstanceKey); this means the variables is not propagated to upper scopes. // for example, let's say we have two scopes, '1' and '2', with each having effective variables as: // 1 => `{ "foo" : 2 }`, and 2 => `{ "bar" : 1 }`. if we send an update request with // elementInstanceKey = 2, variables `{ "foo" : 5 }`, and local is true, then scope 1 will // be unchanged, and scope 2 will now be `{ "bar" : 1, "foo" 5 }`. if local was false, however, // then scope 1 would be `{ "foo": 5 }`, and scope 2 would be `{ "bar" : 1 }`. bool local = 3; } message SetVariablesResponse { // the unique key of the set variables command int64 key = 1; } service Gateway { /* Iterates through all known partitions round-robin and activates up to the requested maximum and streams them back to the client as they are activated. Errors: INVALID_ARGUMENT: - type is blank (empty string, null) - worker is blank (empty string, null) - timeout less than 1 - maxJobsToActivate is less than 1 */ rpc ActivateJobs (ActivateJobsRequest) returns (stream ActivateJobsResponse) { } /* Cancels a running workflow instance Errors: NOT_FOUND: - no workflow instance exists with the given key */ rpc CancelWorkflowInstance (CancelWorkflowInstanceRequest) returns (CancelWorkflowInstanceResponse) { } /* Completes a job with the given variables, which allows completing the associated service task. Errors: NOT_FOUND: - no job exists with the given job key. Note that since jobs are removed once completed, it could be that this job did exist at some point. FAILED_PRECONDITION: - the job was marked as failed. In that case, the related incident must be resolved before the job can be activated again and completed. */ rpc CompleteJob (CompleteJobRequest) returns (CompleteJobResponse) { } /* Creates and starts an instance of the specified workflow. The workflow definition to use to create the instance can be specified either using its unique key (as returned by DeployWorkflow), or using the BPMN process ID and a version. Pass -1 as the version to use the latest deployed version. Note that only workflows with none start events can be started through this command. Errors: NOT_FOUND: - no workflow with the given key exists (if workflowKey was given) - no workflow with the given process ID exists (if bpmnProcessId was given but version was -1) - no workflow with the given process ID and version exists (if both bpmnProcessId and version were given) FAILED_PRECONDITION: - the workflow definition does not contain a none start event; only workflows with none start event can be started manually. INVALID_ARGUMENT: - the given variables argument is not a valid JSON document; it is expected to be a valid JSON document where the root node is an object. */ rpc CreateWorkflowInstance (CreateWorkflowInstanceRequest) returns (CreateWorkflowInstanceResponse) { } /* Behaves similarly to `rpc CreateWorkflowInstance`, except that a successful response is received when the workflow completes successfully. */ rpc CreateWorkflowInstanceWithResult (CreateWorkflowInstanceWithResultRequest) returns (CreateWorkflowInstanceWithResultResponse) { } /* Deploys one or more workflows to Zeebe. Note that this is an atomic call, i.e. either all workflows are deployed, or none of them are. Errors: INVALID_ARGUMENT: - no resources given. - if at least one resource is invalid. A resource is considered invalid if: - it is not a BPMN or YAML file (currently detected through the file extension) - the resource data is not deserializable (e.g. detected as BPMN, but it's broken XML) - the workflow is invalid (e.g. an event-based gateway has an outgoing sequence flow to a task) */ rpc DeployWorkflow (DeployWorkflowRequest) returns (DeployWorkflowResponse) { } /* Marks the job as failed; if the retries argument is positive, then the job will be immediately activatable again, and a worker could try again to process it. If it is zero or negative however, an incident will be raised, tagged with the given errorMessage, and the job will not be activatable until the incident is resolved. Errors: NOT_FOUND: - no job was found with the given key FAILED_PRECONDITION: - the job was not activated - the job is already in a failed state, i.e. ran out of retries */ rpc FailJob (FailJobRequest) returns (FailJobResponse) { } /* Reports a business error (i.e. non-technical) that occurs while processing a job. The error is handled in the workflow by an error catch event. If there is no error catch event with the specified errorCode then an incident will be raised instead. Errors: NOT_FOUND: - no job was found with the given key FAILED_PRECONDITION: - the job is not in an activated state */ rpc ThrowError (ThrowErrorRequest) returns (ThrowErrorResponse) { } /* Publishes a single message. Messages are published to specific partitions computed from their correlation keys. Errors: ALREADY_EXISTS: - a message with the same ID was previously published (and is still alive) */ rpc PublishMessage (PublishMessageRequest) returns (PublishMessageResponse) { } /* Resolves a given incident. This simply marks the incident as resolved; most likely a call to UpdateJobRetries or SetVariables will be necessary to actually resolve the problem, following by this call. Errors: NOT_FOUND: - no incident with the given key exists */ rpc ResolveIncident (ResolveIncidentRequest) returns (ResolveIncidentResponse) { } /* Updates all the variables of a particular scope (e.g. workflow instance, flow element instance) from the given JSON document. Errors: NOT_FOUND: - no element with the given elementInstanceKey exists INVALID_ARGUMENT: - the given variables document is not a valid JSON document; valid documents are expected to be JSON documents where the root node is an object. */ rpc SetVariables (SetVariablesRequest) returns (SetVariablesResponse) { } /* Obtains the current topology of the cluster the gateway is part of. */ rpc Topology (TopologyRequest) returns (TopologyResponse) { } /* Updates the number of retries a job has left. This is mostly useful for jobs that have run out of retries, should the underlying problem be solved. Errors: NOT_FOUND: - no job exists with the given key INVALID_ARGUMENT: - retries is not greater than 0 */ rpc UpdateJobRetries (UpdateJobRetriesRequest) returns (UpdateJobRetriesResponse) { } }