Comparing changes

v0.4.2 v0.5.0
5 commits 8 files changed

Commits

8558ee3 chore: prep v0.5.0 release mo khan 2026-01-08 05:42:09
b31fe50 feat: add tool aliases mo khan 2025-12-03 21:38:33
ec29950 feat: print thinking text mo khan 2025-12-02 00:00:06
lib/elelem/agent.rb
@@ -74,6 +74,21 @@ module Elelem
       ""
     end
 
+    def format_tool_calls_for_api(tool_calls)
+      tool_calls.map do |tc|
+        args = openai_client? ? JSON.dump(tc[:arguments]) : tc[:arguments]
+        {
+          id: tc[:id],
+          type: "function",
+          function: { name: tc[:name], arguments: args }
+        }
+      end
+    end
+
+    def openai_client?
+      client.is_a?(Net::Llm::OpenAI)
+    end
+
     def execute_turn(messages, tools:)
       turn_context = []
 
@@ -81,32 +96,31 @@ module Elelem
         content = ""
         tool_calls = []
 
-        print "Assistant> Thinking..."
-        client.chat(messages + turn_context, tools) do |chunk|
-          msg = chunk["message"]
-          if msg
-            if msg["content"] && !msg["content"].empty?
-              print "\r\e[K" if content.empty?
-              print msg["content"]
-              content += msg["content"]
-            end
-
-            tool_calls += msg["tool_calls"] if msg["tool_calls"]
+        print "Thinking> "
+        client.fetch(messages + turn_context, tools) do |chunk|
+          case chunk[:type]
+          when :delta
+            print chunk[:thinking] if chunk[:thinking]
+            content += chunk[:content] if chunk[:content]
+          when :complete
+            content = chunk[:content] if chunk[:content]
+            tool_calls = chunk[:tool_calls] || []
           end
         end
 
-        puts
-        turn_context << { role: "assistant", content: content, tool_calls: tool_calls }.compact
+        puts "\nAssistant> #{content}" unless content.to_s.empty?
+        api_tool_calls = tool_calls.any? ? format_tool_calls_for_api(tool_calls) : nil
+        turn_context << { role: "assistant", content: content, tool_calls: api_tool_calls }.compact
 
         if tool_calls.any?
           tool_calls.each do |call|
-            name = call.dig("function", "name")
-            args = call.dig("function", "arguments")
+            name = call[:name]
+            args = call[:arguments]
 
-            puts "Tool> #{name}(#{args})"
+            puts "\nTool> #{name}(#{args})"
             result = toolbox.run_tool(name, args)
             puts format_tool_call_result(result)
-            turn_context << { role: "tool", content: JSON.dump(result) }
+            turn_context << { role: "tool", tool_call_id: call[:id], content: JSON.dump(result) }
           end
 
           tool_calls = []
lib/elelem/application.rb
@@ -2,27 +2,40 @@
 
 module Elelem
   class Application < Thor
+    PROVIDERS = %w[ollama anthropic openai vertex-ai].freeze
+
     desc "chat", "Start the REPL"
-    method_option :host,
-                  aliases: "--host",
+    method_option :provider,
+                  aliases: "-p",
                   type: :string,
-                  desc: "Ollama host",
-                  default: ENV.fetch("OLLAMA_HOST", "localhost:11434")
+                  desc: "LLM provider (#{PROVIDERS.join(', ')})",
+                  default: ENV.fetch("ELELEM_PROVIDER", "ollama")
     method_option :model,
-                  aliases: "--model",
+                  aliases: "-m",
                   type: :string,
-                  desc: "Ollama model",
-                  default: ENV.fetch("OLLAMA_MODEL", "gpt-oss")
+                  desc: "Model name (uses provider default if not specified)"
     def chat(*)
-      client = Net::Llm::Ollama.new(
-        host: options[:host],
-        model: options[:model],
-      )
-      say "Agent (#{options[:model]})", :green
+      client = build_client
+      say "Agent (#{options[:provider]}/#{client.model})", :green
       agent = Agent.new(client, Toolbox.new)
       agent.repl
     end
 
+    private
+
+    def build_client
+      model_opts = options[:model] ? { model: options[:model] } : {}
+
+      case options[:provider]
+      when "ollama"     then Net::Llm::Ollama.new(**model_opts)
+      when "anthropic"  then Net::Llm::Anthropic.new(**model_opts)
+      when "openai"     then Net::Llm::OpenAI.new(**model_opts)
+      when "vertex-ai"  then Net::Llm::VertexAI.new(**model_opts)
+      else
+        raise Error, "Unknown provider: #{options[:provider]}. Use: #{PROVIDERS.join(', ')}"
+      end
+    end
+
     desc "files", "Generate CXML of the files"
     def files
       puts '<documents>'
lib/elelem/toolbox.rb
@@ -2,6 +2,7 @@
 
 module Elelem
   class Toolbox
+
     READ_TOOL = Tool.build("read", "Read complete contents of a file. Requires exact file path.", { path: { type: "string" } }, ["path"]) do |args|
       path = args["path"]
       full_path = Pathname.new(path).expand_path
@@ -36,6 +37,13 @@ module Elelem
       { bytes_written: full_path.write(args["content"]) }
     end
 
+    TOOL_ALIASES = {
+      "exec" => "bash",
+      "execute" => "bash",
+      "open" => "read",
+      "search" => "grep",
+    }
+
     attr_reader :tools
 
     def initialize
@@ -64,7 +72,8 @@ module Elelem
     end
 
     def run_tool(name, args)
-      @tools_by_name[name]&.call(args) || { error: "Unknown tool", name: name, args: args }
+      resolved_name = TOOL_ALIASES.fetch(name, name)
+      @tools_by_name[resolved_name]&.call(args) || { error: "Unknown tool", name: name, args: args }
     rescue => error
       { error: error.message, name: name, args: args, backtrace: error.backtrace.first(5) }
     end
lib/elelem/version.rb
@@ -1,5 +1,5 @@
 # frozen_string_literal: true
 
 module Elelem
-  VERSION = "0.4.2"
+  VERSION = "0.5.0"
 end
CHANGELOG.md
@@ -1,5 +1,18 @@
 ## [Unreleased]
 
+## [0.5.0] - 2025-01-07
+
+### Added
+- Multi-provider support: Ollama, Anthropic, OpenAI, and VertexAI
+- `--provider` CLI option to select LLM provider (default: ollama)
+- `--model` CLI option to override default model
+- Tool aliases (`bash` also accepts `exec`, `shell`, `command`, `terminal`, `run`)
+- Thinking text output for models that support extended thinking
+
+### Changed
+- Requires net-llm >= 0.5.0 with unified fetch interface
+- Updated gem description to reflect multi-provider support
+
 ## [0.4.2] - 2025-12-01
 
 ### Changed
elelem.gemspec
@@ -8,25 +8,17 @@ Gem::Specification.new do |spec|
   spec.authors = ["mo khan"]
   spec.email = ["mo@mokhan.ca"]
 
-  spec.summary = "A REPL for Ollama."
-  spec.description = "A REPL for Ollama."
-  spec.homepage = "https://github.com/xlgmokha/elelem"
+  spec.summary = "A minimal coding agent for LLMs."
+  spec.description = "A minimal coding agent supporting Ollama, Anthropic, OpenAI, and VertexAI."
+  spec.homepage = "https://src.mokhan.ca/xlgmokha/elelem"
   spec.license = "MIT"
   spec.required_ruby_version = ">= 3.4.0"
   spec.required_rubygems_version = ">= 3.3.11"
   spec.metadata["allowed_push_host"] = "https://rubygems.org"
   spec.metadata["homepage_uri"] = spec.homepage
-  spec.metadata["source_code_uri"] = "https://github.com/xlgmokha/elelem"
-  spec.metadata["changelog_uri"] = "https://github.com/xlgmokha/elelem/blob/main/CHANGELOG.md"
+  spec.metadata["source_code_uri"] = "https://src.mokhan.ca/xlgmokha/elelem"
+  spec.metadata["changelog_uri"] = "https://src.mokhan.ca/xlgmokha/elelem/blob/main/CHANGELOG.md.html"
 
-  # Specify which files should be added to the gem when it is released.
-  # The `git ls-files -z` loads the files in the RubyGem that have been added into git.
-  # gemspec = File.basename(__FILE__)
-  # spec.files = IO.popen(%w[git ls-files -z], chdir: __dir__, err: IO::NULL) do |ls|
-  #   ls.readlines("\x0", chomp: true).reject do |f|
-  #     (f == gemspec) || f.start_with?(*%w[bin/ test/ spec/ features/ .git Gemfile])
-  #   end
-  # end
   spec.files = [
     "CHANGELOG.md",
     "LICENSE.txt",
@@ -46,16 +38,16 @@ Gem::Specification.new do |spec|
   spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
   spec.require_paths = ["lib"]
 
-  spec.add_dependency "erb"
-  spec.add_dependency "fileutils"
-  spec.add_dependency "json"
-  spec.add_dependency "json-schema"
-  spec.add_dependency "logger"
-  spec.add_dependency "net-llm"
-  spec.add_dependency "open3"
-  spec.add_dependency "pathname"
-  spec.add_dependency "reline"
-  spec.add_dependency "set"
-  spec.add_dependency "thor"
-  spec.add_dependency "timeout"
+  spec.add_dependency "erb", "~> 6.0"
+  spec.add_dependency "fileutils", "~> 1.0"
+  spec.add_dependency "json", "~> 2.0"
+  spec.add_dependency "json-schema", "~> 6.0"
+  spec.add_dependency "logger", "~> 1.0"
+  spec.add_dependency "net-llm", "~> 0.5", ">= 0.5.0"
+  spec.add_dependency "open3", "~> 0.1"
+  spec.add_dependency "pathname", "~> 0.1"
+  spec.add_dependency "reline", "~> 0.6"
+  spec.add_dependency "set", "~> 1.0"
+  spec.add_dependency "thor", "~> 1.0"
+  spec.add_dependency "timeout", "~> 0.1"
 end
Gemfile.lock
@@ -1,19 +1,19 @@
 PATH
   remote: .
   specs:
-    elelem (0.4.2)
-      erb
-      fileutils
-      json
-      json-schema
-      logger
-      net-llm
-      open3
-      pathname
-      reline
-      set
-      thor
-      timeout
+    elelem (0.5.0)
+      erb (~> 6.0)
+      fileutils (~> 1.0)
+      json (~> 2.0)
+      json-schema (~> 6.0)
+      logger (~> 1.0)
+      net-llm (~> 0.5, >= 0.5.0)
+      open3 (~> 0.1)
+      pathname (~> 0.1)
+      reline (~> 0.6)
+      set (~> 1.0)
+      thor (~> 1.0)
+      timeout (~> 0.1)
 
 GEM
   remote: https://rubygems.org/
@@ -24,7 +24,7 @@ GEM
     bigdecimal (3.2.2)
     date (3.4.1)
     diff-lcs (1.6.2)
-    erb (5.0.2)
+    erb (6.0.1)
     fileutils (1.8.0)
     io-console (0.8.1)
     irb (1.15.2)
@@ -44,7 +44,7 @@ GEM
       openssl (~> 3.0)
     net-http (0.6.0)
       uri
-    net-llm (0.4.0)
+    net-llm (0.5.0)
       json (~> 2.0)
       net-hippie (~> 1.0)
       uri (~> 1.0)
README.md
@@ -63,7 +63,7 @@ gem install elelem
 
 ## Usage
 
-Start an interactive chat session with an Ollama model:
+Start an interactive chat session:
 
 ```bash
 elelem chat
@@ -71,20 +71,36 @@ elelem chat
 
 ### Options
 
-* `--host` – Ollama host (default: `localhost:11434`).
-* `--model` – Ollama model (default: `gpt-oss`).
-* `--token` – Authentication token.
+* `--provider` – LLM provider: `ollama`, `anthropic`, `openai`, or `vertex-ai` (default: `ollama`).
+* `--model` – Override the default model for the selected provider.
 
 ### Examples
 
 ```bash
-# Default model
+# Default (Ollama)
 elelem chat
 
-# Specific model and host
-elelem chat --model llama2 --host remote-host:11434
+# Anthropic Claude
+ANTHROPIC_API_KEY=sk-... elelem chat --provider anthropic
+
+# OpenAI
+OPENAI_API_KEY=sk-... elelem chat --provider openai
+
+# VertexAI (uses gcloud ADC)
+elelem chat --provider vertex-ai --model claude-sonnet-4@20250514
 ```
 
+### Provider Configuration
+
+Each provider reads its configuration from environment variables:
+
+| Provider    | Environment Variables                              |
+|-------------|---------------------------------------------------|
+| ollama      | `OLLAMA_HOST` (default: localhost:11434)          |
+| anthropic   | `ANTHROPIC_API_KEY`                               |
+| openai      | `OPENAI_API_KEY`, `OPENAI_BASE_URL`               |
+| vertex-ai   | `GOOGLE_CLOUD_PROJECT`, `GOOGLE_CLOUD_REGION`     |
+
 ## Mode System
 
 The agent exposes seven built‑in tools. You can switch which ones are
@@ -148,8 +164,7 @@ arguments as a hash.
 
 ## Contributing
 
-Feel free to open issues or pull requests. The repository follows the
-GitHub Flow.
+Send me an email. For instructions see https://git-send-email.io/.
 
 ## License