Commit 3618ec2
Changed files (4)
spec
net
lib/net/llm/openai.rb
@@ -0,0 +1,59 @@
+# frozen_string_literal: true
+
+module Net
+ module Llm
+ class OpenAI
+ attr_reader :api_key, :base_url, :model
+
+ def initialize(api_key:, base_url: "https://api.openai.com/v1", model: "gpt-4o-mini")
+ @api_key = api_key
+ @base_url = base_url
+ @model = model
+ end
+
+ def chat(messages, tools, timeout: DEFAULT_TIMEOUT)
+ response = client(timeout).post(
+ "#{base_url}/chat/completions",
+ headers: auth_headers,
+ body: { model: model, messages: messages, tools: tools, tool_choice: "auto" }
+ )
+ handle_response(response)
+ end
+
+ def models(timeout: DEFAULT_TIMEOUT)
+ response = client(timeout).get(
+ "#{base_url}/models",
+ headers: auth_headers
+ )
+ handle_response(response)
+ end
+
+ def embeddings(input, model: "text-embedding-ada-002", timeout: DEFAULT_TIMEOUT)
+ response = client(timeout).post(
+ "#{base_url}/embeddings",
+ headers: auth_headers,
+ body: { model: model, input: input }
+ )
+ handle_response(response)
+ end
+
+ private
+
+ def client(timeout)
+ Net::Hippie::Client.new(
+ read_timeout: timeout,
+ open_timeout: timeout
+ )
+ end
+
+ def auth_headers
+ { "Authorization" => Net::Hippie.bearer_auth(api_key) }
+ end
+
+ def handle_response(response)
+ raise "HTTP #{response.code}: #{response.body}" unless response.is_a?(Net::HTTPSuccess)
+ JSON.parse(response.body)
+ end
+ end
+ end
+end
lib/net/llm.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
require_relative "llm/version"
+require_relative "llm/openai"
require_relative "llm/ollama"
require_relative "llm/anthropic"
require "net/hippie"
@@ -10,59 +11,5 @@ module Net
module Llm
class Error < StandardError; end
DEFAULT_TIMEOUT = 60 * 2
-
- class OpenAI
- attr_reader :api_key, :base_url, :model
-
- def initialize(api_key:, base_url: "https://api.openai.com/v1", model: "gpt-4o-mini")
- @api_key = api_key
- @base_url = base_url
- @model = model
- end
-
- def chat(messages, tools, timeout: DEFAULT_TIMEOUT)
- response = client(timeout).post(
- "#{base_url}/chat/completions",
- headers: auth_headers,
- body: { model: model, messages: messages, tools: tools, tool_choice: "auto" }
- )
- handle_response(response)
- end
-
- def models(timeout: DEFAULT_TIMEOUT)
- response = client(timeout).get(
- "#{base_url}/models",
- headers: auth_headers
- )
- handle_response(response)
- end
-
- def embeddings(input, model: "text-embedding-ada-002", timeout: DEFAULT_TIMEOUT)
- response = client(timeout).post(
- "#{base_url}/embeddings",
- headers: auth_headers,
- body: { model: model, input: input }
- )
- handle_response(response)
- end
-
- private
-
- def client(timeout)
- Net::Hippie::Client.new(
- read_timeout: timeout,
- open_timeout: timeout
- )
- end
-
- def auth_headers
- { "Authorization" => Net::Hippie.bearer_auth(api_key) }
- end
-
- def handle_response(response)
- raise "HTTP #{response.code}: #{response.body}" unless response.is_a?(Net::HTTPSuccess)
- JSON.parse(response.body)
- end
- end
end
end
spec/net/llm/openai_spec.rb
@@ -0,0 +1,118 @@
+# frozen_string_literal: true
+
+RSpec.describe Net::Llm::OpenAI do
+ let(:api_key) { "test-key" }
+ let(:client) { described_class.new(api_key: api_key) }
+
+ describe "#initialize" do
+ it "sets default base_url" do
+ expect(client.base_url).to eq("https://api.openai.com/v1")
+ end
+
+ it "sets default model" do
+ expect(client.model).to eq("gpt-4o-mini")
+ end
+
+ it "allows custom base_url" do
+ custom_client = described_class.new(api_key: api_key, base_url: "https://custom.com/v1")
+ expect(custom_client.base_url).to eq("https://custom.com/v1")
+ end
+
+ it "allows custom model" do
+ custom_client = described_class.new(api_key: api_key, model: "gpt-4")
+ expect(custom_client.model).to eq("gpt-4")
+ end
+ end
+
+ describe "#chat" do
+ let(:messages) { [{ role: "user", content: "Hello" }] }
+ let(:tools) { [] }
+ let(:response_body) { { choices: [{ message: { content: "Hi" } }] }.to_json }
+
+ it "makes a POST request to chat/completions" do
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
+ .with(
+ headers: {
+ "Authorization" => "Bearer #{api_key}",
+ "Content-Type" => "application/json"
+ },
+ body: hash_including(
+ model: "gpt-4o-mini",
+ messages: messages,
+ tools: tools,
+ tool_choice: "auto"
+ )
+ )
+ .to_return(status: 200, body: response_body)
+
+ result = client.chat(messages, tools)
+ expect(result["choices"][0]["message"]["content"]).to eq("Hi")
+ end
+
+ it "raises on HTTP error" do
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
+ .to_return(status: 401, body: "Unauthorized")
+
+ expect { client.chat(messages, tools) }.to raise_error(/HTTP 401/)
+ end
+ end
+
+ describe "#models" do
+ let(:response_body) { { data: [{ id: "gpt-4o-mini" }] }.to_json }
+
+ it "makes a GET request to models" do
+ stub_request(:get, "https://api.openai.com/v1/models")
+ .with(headers: { "Authorization" => "Bearer #{api_key}" })
+ .to_return(status: 200, body: response_body)
+
+ result = client.models
+ expect(result["data"]).to be_an(Array)
+ expect(result["data"][0]["id"]).to eq("gpt-4o-mini")
+ end
+
+ it "raises on HTTP error" do
+ stub_request(:get, "https://api.openai.com/v1/models")
+ .to_return(status: 500, body: "Server error")
+
+ expect { client.models }.to raise_error(/HTTP 500/)
+ end
+ end
+
+ describe "#embeddings" do
+ let(:input) { "Hello world" }
+ let(:response_body) { { data: [{ embedding: [0.1, 0.2, 0.3] }] }.to_json }
+
+ it "makes a POST request to embeddings" do
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
+ .with(
+ headers: {
+ "Authorization" => "Bearer #{api_key}",
+ "Content-Type" => "application/json"
+ },
+ body: hash_including(
+ model: "text-embedding-ada-002",
+ input: input
+ )
+ )
+ .to_return(status: 200, body: response_body)
+
+ result = client.embeddings(input)
+ expect(result["data"][0]["embedding"]).to eq([0.1, 0.2, 0.3])
+ end
+
+ it "allows custom model" do
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
+ .with(body: hash_including(model: "text-embedding-3-small"))
+ .to_return(status: 200, body: response_body)
+
+ client.embeddings(input, model: "text-embedding-3-small")
+ end
+
+ it "raises on HTTP error" do
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
+ .to_return(status: 400, body: "Bad request")
+
+ expect { client.embeddings(input) }.to raise_error(/HTTP 400/)
+ end
+ end
+end
spec/net/llm_spec.rb
@@ -5,120 +5,3 @@ RSpec.describe Net::Llm do
expect(Net::Llm::VERSION).not_to be nil
end
end
-
-RSpec.describe Net::Llm::OpenAI do
- let(:api_key) { "test-key" }
- let(:client) { described_class.new(api_key: api_key) }
-
- describe "#initialize" do
- it "sets default base_url" do
- expect(client.base_url).to eq("https://api.openai.com/v1")
- end
-
- it "sets default model" do
- expect(client.model).to eq("gpt-4o-mini")
- end
-
- it "allows custom base_url" do
- custom_client = described_class.new(api_key: api_key, base_url: "https://custom.com/v1")
- expect(custom_client.base_url).to eq("https://custom.com/v1")
- end
-
- it "allows custom model" do
- custom_client = described_class.new(api_key: api_key, model: "gpt-4")
- expect(custom_client.model).to eq("gpt-4")
- end
- end
-
- describe "#chat" do
- let(:messages) { [{ role: "user", content: "Hello" }] }
- let(:tools) { [] }
- let(:response_body) { { choices: [{ message: { content: "Hi" } }] }.to_json }
-
- it "makes a POST request to chat/completions" do
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
- .with(
- headers: {
- "Authorization" => "Bearer #{api_key}",
- "Content-Type" => "application/json"
- },
- body: hash_including(
- model: "gpt-4o-mini",
- messages: messages,
- tools: tools,
- tool_choice: "auto"
- )
- )
- .to_return(status: 200, body: response_body)
-
- result = client.chat(messages, tools)
- expect(result["choices"][0]["message"]["content"]).to eq("Hi")
- end
-
- it "raises on HTTP error" do
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
- .to_return(status: 401, body: "Unauthorized")
-
- expect { client.chat(messages, tools) }.to raise_error(/HTTP 401/)
- end
- end
-
- describe "#models" do
- let(:response_body) { { data: [{ id: "gpt-4o-mini" }] }.to_json }
-
- it "makes a GET request to models" do
- stub_request(:get, "https://api.openai.com/v1/models")
- .with(headers: { "Authorization" => "Bearer #{api_key}" })
- .to_return(status: 200, body: response_body)
-
- result = client.models
- expect(result["data"]).to be_an(Array)
- expect(result["data"][0]["id"]).to eq("gpt-4o-mini")
- end
-
- it "raises on HTTP error" do
- stub_request(:get, "https://api.openai.com/v1/models")
- .to_return(status: 500, body: "Server error")
-
- expect { client.models }.to raise_error(/HTTP 500/)
- end
- end
-
- describe "#embeddings" do
- let(:input) { "Hello world" }
- let(:response_body) { { data: [{ embedding: [0.1, 0.2, 0.3] }] }.to_json }
-
- it "makes a POST request to embeddings" do
- stub_request(:post, "https://api.openai.com/v1/embeddings")
- .with(
- headers: {
- "Authorization" => "Bearer #{api_key}",
- "Content-Type" => "application/json"
- },
- body: hash_including(
- model: "text-embedding-ada-002",
- input: input
- )
- )
- .to_return(status: 200, body: response_body)
-
- result = client.embeddings(input)
- expect(result["data"][0]["embedding"]).to eq([0.1, 0.2, 0.3])
- end
-
- it "allows custom model" do
- stub_request(:post, "https://api.openai.com/v1/embeddings")
- .with(body: hash_including(model: "text-embedding-3-small"))
- .to_return(status: 200, body: response_body)
-
- client.embeddings(input, model: "text-embedding-3-small")
- end
-
- it "raises on HTTP error" do
- stub_request(:post, "https://api.openai.com/v1/embeddings")
- .to_return(status: 400, body: "Bad request")
-
- expect { client.embeddings(input) }.to raise_error(/HTTP 400/)
- end
- end
-end