main
1# frozen_string_literal: true
2
3RSpec.describe Net::Llm::OpenAI do
4 let(:api_key) { "test-key" }
5 let(:client) { described_class.new(api_key: api_key) }
6
7 describe "#initialize" do
8 it "sets default base_url" do
9 expect(client.base_url).to eq("https://api.openai.com/v1")
10 end
11
12 it "sets default model" do
13 expect(client.model).to eq("gpt-4o-mini")
14 end
15
16 it "allows custom base_url" do
17 custom_client = described_class.new(api_key: api_key, base_url: "https://custom.com/v1")
18 expect(custom_client.base_url).to eq("https://custom.com/v1")
19 end
20
21 it "allows custom model" do
22 custom_client = described_class.new(api_key: api_key, model: "gpt-4")
23 expect(custom_client.model).to eq("gpt-4")
24 end
25 end
26
27 describe "#chat" do
28 let(:messages) { [{ role: "user", content: "Hello" }] }
29 let(:tools) { [] }
30 let(:response_body) { { choices: [{ message: { content: "Hi" } }] }.to_json }
31
32 it "makes a POST request to chat/completions" do
33 stub_request(:post, "https://api.openai.com/v1/chat/completions")
34 .with(
35 headers: {
36 "Authorization" => "Bearer #{api_key}",
37 "Content-Type" => "application/json"
38 },
39 body: hash_including(
40 model: "gpt-4o-mini",
41 messages: messages,
42 tools: tools,
43 tool_choice: "auto"
44 )
45 )
46 .to_return(status: 200, body: response_body)
47
48 result = client.chat(messages, tools)
49 expect(result["choices"][0]["message"]["content"]).to eq("Hi")
50 end
51
52 it "raises on HTTP error" do
53 stub_request(:post, "https://api.openai.com/v1/chat/completions")
54 .to_return(status: 401, body: "Unauthorized")
55
56 result = client.chat(messages, tools)
57 expect(result["code"]).to eq("401")
58 expect(result["body"]).to eq("Unauthorized")
59 end
60 end
61
62 describe "#models" do
63 let(:response_body) { { data: [{ id: "gpt-4o-mini" }] }.to_json }
64
65 it "makes a GET request to models" do
66 stub_request(:get, "https://api.openai.com/v1/models")
67 .with(headers: { "Authorization" => "Bearer #{api_key}" })
68 .to_return(status: 200, body: response_body)
69
70 result = client.models
71 expect(result["data"]).to be_an(Array)
72 expect(result["data"][0]["id"]).to eq("gpt-4o-mini")
73 end
74
75 it "raises on HTTP error" do
76 stub_request(:get, "https://api.openai.com/v1/models")
77 .to_return(status: 500, body: "Server error")
78
79 result = client.models
80 expect(result["code"]).to eq("500")
81 expect(result["body"]).to eq("Server error")
82 end
83 end
84
85 describe "#embeddings" do
86 let(:input) { "Hello world" }
87 let(:response_body) { { data: [{ embedding: [0.1, 0.2, 0.3] }] }.to_json }
88
89 it "makes a POST request to embeddings" do
90 stub_request(:post, "https://api.openai.com/v1/embeddings")
91 .with(
92 headers: {
93 "Authorization" => "Bearer #{api_key}",
94 "Content-Type" => "application/json"
95 },
96 body: hash_including(
97 model: "text-embedding-ada-002",
98 input: input
99 )
100 )
101 .to_return(status: 200, body: response_body)
102
103 result = client.embeddings(input)
104 expect(result["data"][0]["embedding"]).to eq([0.1, 0.2, 0.3])
105 end
106
107 it "allows custom model" do
108 stub_request(:post, "https://api.openai.com/v1/embeddings")
109 .with(body: hash_including(model: "text-embedding-3-small"))
110 .to_return(status: 200, body: response_body)
111
112 client.embeddings(input, model: "text-embedding-3-small")
113 end
114
115 it "raises on HTTP error" do
116 stub_request(:post, "https://api.openai.com/v1/embeddings")
117 .to_return(status: 400, body: "Bad request")
118
119 result = client.embeddings(input)
120 expect(result["code"]).to eq("400")
121 expect(result["body"]).to eq("Bad request")
122 end
123 end
124end