# frozen_string_literal: true # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Auto-generated by gapic-generator-ruby. DO NOT EDIT! module Google module Cloud module Notebooks module V1 # The description a notebook execution workload. # @!attribute [rw] scale_tier # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::ScaleTier] # Required. Scale tier of the hardware used for notebook execution. # DEPRECATED Will be discontinued. As right now only CUSTOM is supported. # @!attribute [rw] master_type # @return [::String] # Specifies the type of virtual machine to use for your training # job's master worker. You must specify this field when `scaleTier` is set to # `CUSTOM`. # # You can use certain Compute Engine machine types directly in this field. # The following types are supported: # # - `n1-standard-4` # - `n1-standard-8` # - `n1-standard-16` # - `n1-standard-32` # - `n1-standard-64` # - `n1-standard-96` # - `n1-highmem-2` # - `n1-highmem-4` # - `n1-highmem-8` # - `n1-highmem-16` # - `n1-highmem-32` # - `n1-highmem-64` # - `n1-highmem-96` # - `n1-highcpu-16` # - `n1-highcpu-32` # - `n1-highcpu-64` # - `n1-highcpu-96` # # # Alternatively, you can use the following legacy machine types: # # - `standard` # - `large_model` # - `complex_model_s` # - `complex_model_m` # - `complex_model_l` # - `standard_gpu` # - `complex_model_m_gpu` # - `complex_model_l_gpu` # - `standard_p100` # - `complex_model_m_p100` # - `standard_v100` # - `large_model_v100` # - `complex_model_m_v100` # - `complex_model_l_v100` # # # Finally, if you want to use a TPU for training, specify `cloud_tpu` in this # field. Learn more about the [special configuration options for training # with # TPU](https://cloud.google.com/ai-platform/training/docs/using-tpus#configuring_a_custom_tpu_machine). # @!attribute [rw] accelerator_config # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorConfig] # Configuration (count and accelerator type) for hardware running notebook # execution. # @!attribute [rw] labels # @return [::Google::Protobuf::Map{::String => ::String}] # Labels for execution. # If execution is scheduled, a field included will be 'nbs-scheduled'. # Otherwise, it is an immediate execution, and an included field will be # 'nbs-immediate'. Use fields to efficiently index between various types of # executions. # @!attribute [rw] input_notebook_file # @return [::String] # Path to the notebook file to execute. # Must be in a Google Cloud Storage bucket. # Format: `gs://{bucket_name}/{folder}/{notebook_file_name}` # Ex: `gs://notebook_user/scheduled_notebooks/sentiment_notebook.ipynb` # @!attribute [rw] container_image_uri # @return [::String] # Container Image URI to a DLVM # Example: 'gcr.io/deeplearning-platform-release/base-cu100' # More examples can be found at: # https://cloud.google.com/ai-platform/deep-learning-containers/docs/choosing-container # @!attribute [rw] output_notebook_folder # @return [::String] # Path to the notebook folder to write to. # Must be in a Google Cloud Storage bucket path. # Format: `gs://{bucket_name}/{folder}` # Ex: `gs://notebook_user/scheduled_notebooks` # @!attribute [rw] params_yaml_file # @return [::String] # Parameters to be overridden in the notebook during execution. # Ref https://papermill.readthedocs.io/en/latest/usage-parameterize.html on # how to specifying parameters in the input notebook and pass them here # in an YAML file. # Ex: `gs://notebook_user/scheduled_notebooks/sentiment_notebook_params.yaml` # @!attribute [rw] parameters # @return [::String] # Parameters used within the 'input_notebook_file' notebook. # @!attribute [rw] service_account # @return [::String] # The email address of a service account to use when running the execution. # You must have the `iam.serviceAccounts.actAs` permission for the specified # service account. # @!attribute [rw] job_type # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::JobType] # The type of Job to be used on this execution. # @!attribute [rw] dataproc_parameters # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::DataprocParameters] # Parameters used in Dataproc JobType executions. # @!attribute [rw] vertex_ai_parameters # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::VertexAIParameters] # Parameters used in Vertex AI JobType executions. # @!attribute [rw] kernel_spec # @return [::String] # Name of the kernel spec to use. This must be specified if the # kernel spec name on the execution target does not match the name in the # input notebook file. # @!attribute [rw] tensorboard # @return [::String] # The name of a Vertex AI [Tensorboard] resource to which this execution # will upload Tensorboard logs. # Format: # `projects/{project}/locations/{location}/tensorboards/{tensorboard}` class ExecutionTemplate include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Definition of a hardware accelerator. Note that not all combinations # of `type` and `core_count` are valid. Check [GPUs on # Compute Engine](https://cloud.google.com/compute/docs/gpus) to find a valid # combination. TPUs are not supported. # @!attribute [rw] type # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate::SchedulerAcceleratorType] # Type of this accelerator. # @!attribute [rw] core_count # @return [::Integer] # Count of cores of this accelerator. class SchedulerAcceleratorConfig include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Parameters used in Dataproc JobType executions. # @!attribute [rw] cluster # @return [::String] # URI for cluster used to run Dataproc execution. # Format: `projects/{PROJECT_ID}/regions/{REGION}/clusters/{CLUSTER_NAME}` class DataprocParameters include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Parameters used in Vertex AI JobType executions. # @!attribute [rw] network # @return [::String] # The full name of the Compute Engine # [network](/compute/docs/networks-and-firewalls#networks) to which the Job # should be peered. For example, `projects/12345/global/networks/myVPC`. # [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert) # is of the form `projects/{project}/global/networks/{network}`. # Where \\{project} is a project number, as in `12345`, and \\{network} is a # network name. # # Private services access must already be configured for the network. If # left unspecified, the job is not peered with any network. # @!attribute [rw] env # @return [::Google::Protobuf::Map{::String => ::String}] # Environment variables. # At most 100 environment variables can be specified and unique. # Example: GCP_BUCKET=gs://my-bucket/samples/ class VertexAIParameters include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # @!attribute [rw] key # @return [::String] # @!attribute [rw] value # @return [::String] class EnvEntry include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end end # @!attribute [rw] key # @return [::String] # @!attribute [rw] value # @return [::String] class LabelsEntry include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods end # Required. Specifies the machine types, the number of replicas for workers # and parameter servers. module ScaleTier # Unspecified Scale Tier. SCALE_TIER_UNSPECIFIED = 0 # A single worker instance. This tier is suitable for learning how to use # Cloud ML, and for experimenting with new models using small datasets. BASIC = 1 # Many workers and a few parameter servers. STANDARD_1 = 2 # A large number of workers with many parameter servers. PREMIUM_1 = 3 # A single worker instance with a K80 GPU. BASIC_GPU = 4 # A single worker instance with a Cloud TPU. BASIC_TPU = 5 # The CUSTOM tier is not a set tier, but rather enables you to use your # own cluster specification. When you use this tier, set values to # configure your processing cluster according to these guidelines: # # * You _must_ set `ExecutionTemplate.masterType` to specify the type # of machine to use for your master node. This is the only required # setting. CUSTOM = 6 end # Hardware accelerator types for AI Platform Training jobs. module SchedulerAcceleratorType # Unspecified accelerator type. Default to no GPU. SCHEDULER_ACCELERATOR_TYPE_UNSPECIFIED = 0 # Nvidia Tesla K80 GPU. NVIDIA_TESLA_K80 = 1 # Nvidia Tesla P100 GPU. NVIDIA_TESLA_P100 = 2 # Nvidia Tesla V100 GPU. NVIDIA_TESLA_V100 = 3 # Nvidia Tesla P4 GPU. NVIDIA_TESLA_P4 = 4 # Nvidia Tesla T4 GPU. NVIDIA_TESLA_T4 = 5 # Nvidia Tesla A100 GPU. NVIDIA_TESLA_A100 = 10 # TPU v2. TPU_V2 = 6 # TPU v3. TPU_V3 = 7 end # The backend used for this execution. module JobType # No type specified. JOB_TYPE_UNSPECIFIED = 0 # Custom Job in `aiplatform.googleapis.com`. # Default value for an execution. VERTEX_AI = 1 # Run execution on a cluster with Dataproc as a job. # https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs DATAPROC = 2 end end # The definition of a single executed notebook. # @!attribute [rw] execution_template # @return [::Google::Cloud::Notebooks::V1::ExecutionTemplate] # execute metadata including name, hardware spec, region, labels, etc. # @!attribute [r] name # @return [::String] # Output only. The resource name of the execute. Format: # `projects/{project_id}/locations/{location}/executions/{execution_id}` # @!attribute [r] display_name # @return [::String] # Output only. Name used for UI purposes. # Name can only contain alphanumeric characters and underscores '_'. # @!attribute [rw] description # @return [::String] # A brief description of this execution. # @!attribute [r] create_time # @return [::Google::Protobuf::Timestamp] # Output only. Time the Execution was instantiated. # @!attribute [r] update_time # @return [::Google::Protobuf::Timestamp] # Output only. Time the Execution was last updated. # @!attribute [r] state # @return [::Google::Cloud::Notebooks::V1::Execution::State] # Output only. State of the underlying AI Platform job. # @!attribute [rw] output_notebook_file # @return [::String] # Output notebook file generated by this execution # @!attribute [r] job_uri # @return [::String] # Output only. The URI of the external job used to execute the notebook. class Execution include ::Google::Protobuf::MessageExts extend ::Google::Protobuf::MessageExts::ClassMethods # Enum description of the state of the underlying AIP job. module State # The job state is unspecified. STATE_UNSPECIFIED = 0 # The job has been just created and processing has not yet begun. QUEUED = 1 # The service is preparing to execution the job. PREPARING = 2 # The job is in progress. RUNNING = 3 # The job completed successfully. SUCCEEDED = 4 # The job failed. # `error_message` should contain the details of the failure. FAILED = 5 # The job is being cancelled. # `error_message` should describe the reason for the cancellation. CANCELLING = 6 # The job has been cancelled. # `error_message` should describe the reason for the cancellation. CANCELLED = 7 # The job has become expired (relevant to Vertex AI jobs) # https://cloud.google.com/vertex-ai/docs/reference/rest/v1/JobState EXPIRED = 9 # The Execution is being created. INITIALIZING = 10 end end end end end end