Sha256: 8b70b73c5774eaf22783fcef149fece4c96071ebe6643cd85db4ec9b76f770d4
Contents?: true
Size: 1.67 KB
Versions: 4
Compression:
Stored size: 1.67 KB
Contents
class Groq::Model MODELS = [ { name: "LLaMA3 8b", model_id: "llama3-8b-8192", developer: "Meta", context_window: 8192, model_card: "https://huggingface.co/meta-llama/Meta-Llama-3-8B" }, { name: "LLaMA3 70b", model_id: "llama3-70b-8192", developer: "Meta", context_window: 8192, model_card: "https://huggingface.co/meta-llama/Meta-Llama-3-70B" }, { name: "LLaMA2 70b", model_id: "llama2-70b-4096", developer: "Meta", context_window: 4096, model_card: "https://huggingface.co/meta-llama/Llama-2-70b" }, { name: "Mixtral 8x7b", model_id: "mixtral-8x7b-32768", developer: "Mistral", context_window: 32768, model_card: "https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1" }, { name: "Gemma 7b", model_id: "gemma-7b-it", developer: "Google", context_window: 8192, model_card: "https://huggingface.co/google/gemma-1.1-7b-it" } ] class << self def model_ids MODELS.map { |m| m[:model_id] } end def default_model MODELS.first end def default_model_id default_model[:model_id] end # https://api.groq.com/openai/v1/models # Output: # {"object": "list", # "data": [ # { # "id": "gemma-7b-it", # "object": "model", # "created": 1693721698, # "owned_by": "Google", # "active": true, # "context_window": 8192 # }, def load_models(client:) client ||= Groq::Client.new response = client.get(path: "/openai/v1/models") response.body end end end
Version data entries
4 entries across 4 versions & 1 rubygems
Version | Path |
---|---|
groq-0.3.2 | lib/groq/model.rb |
groq-0.3.1 | lib/groq/model.rb |
groq-0.3.0 | lib/groq/model.rb |
groq-0.2.0 | lib/groq/model.rb |