8000 Onboard vercel AI SDK by rahulkarajgikar · Pull Request #2 · truffle-ai/saiki · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

Onboard vercel AI SDK #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Apr 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
# Provider-specific API keys
# You only need to set the API key for the provider you're using

# OpenAI API key (required when using OpenAI models)
# Must start with 'sk-'
OPENAI_API_KEY=your_openai_api_key_here

# Anthropic API key (required when using Anthropic Claude models)
# Must start with 'sk-ant-'
ANTHROPIC_API_KEY=your_anthropic_api_key_here

# Optional: Set the log level to debug or silly to see more verbose output
# Set the log level to debug or silly to see more verbose output. Default is info.
# LOG_LEVEL=debug
# LOG_LEVEL=silly
52 changes: 43 additions & 9 deletions app/cli.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ import readline from 'readline';
import chalk from 'chalk';
import { ClientManager } from '../src/client/manager.js';
import { logger } from '../src/utils/logger.js';
import { LLMCallbacks, LLMService } from '../src/ai/llm/types.js';
import { LLMCallbacks, ILLMService } from '../src/ai/llm/types.js';
import { AgentConfig } from '../src/config/types.js';
import { initializeServices } from '../src/utils/service-initializer.js';
import boxen from 'boxen';

/**
* Start AI-powered CLI with unified configuration
Expand Down Expand Up @@ -34,11 +35,15 @@ export async function initializeAiCli(
*/
export async function runAiCli(
clientManager: ClientManager,
llmService: LLMService
llmService: ILLMService
) {
// Get model and provider info directly from the LLM service
const { provider, model } = llmService.getConfig();
logger.info(`Using ${provider} model: ${model}`, null, 'yellow');
// const { provider, model } = llmService.getConfig();
logger.info(
`Using model config:${JSON.stringify(llmService.getConfig(), null, 2)}`,
null,
'yellow'
);

logger.debug(`Log level: ${logger.getLevel()}`);
logger.info(`Connected servers: ${clientManager.getClients().size}`, null, 'green');
Expand All @@ -52,18 +57,18 @@ export async function runAiCli(
// Get available tools from all connected servers
logger.info('Loading available tools...');

// Get all tools from the manager
// Get all tools from the LLM service
const tools = await clientManager.getAllTools();

logger.debug(`Received tools: ${tools.map((t) => t.name)}`);

// Update system context with available tools
// Update the system context with the available tools
logger.info('Updating system context...');
llmService.updateSystemContext(tools);

logger.info(
`Loaded ${tools.length} tools from ${clientManager.getClients().size} tool providers\n`
`Loaded ${Object.keys(tools).length} tools from ${clientManager.getClients().size} MCP servers\n`
);
logger.info('AI Agent initialized successfully!', null, 'green');

// Create readline interface
const rl = readline.createInterface({
input: process.stdin,
Expand Down Expand Up @@ -103,8 +108,37 @@ export async function runAiCli(
}

try {
let accumulatedResponse = '';
let currentLines = 0;
// Create callbacks for progress indication (without spinner)
const callbacks: LLMCallbacks = {
onChunk: (text: string) => {
// Append the new chunk to the accumulated response
accumulatedResponse += text;

// Generate the new box
const box = boxen(chalk.white(accumulatedResponse), {
padding: 1,
borderColor: 'yellow',
title: '🤖 AI Response',
titleAlignment: 'center',
});
const newLines = box.split('\n').length;

// Move cursor up to the start of the previous box (if it exists)
if (currentLines > 0) {
process.stdout.write(`\x1b[${currentLines}A`); // Move up currentLines
}

// Print the new box (this overwrites the old one)
process.stdout.write(box);

// Update the line count
currentLines = newLines;

// Move cursor to the end of the box to allow logs below
process.stdout.write('\n');
},
onThinking: () => {
logger.info('AI thinking...');
},
Expand Down
62 changes: 37 additions & 25 deletions app/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import dotenv from 'dotenv';
import { logger } from '../src/utils/logger.js';
import { initializeAiCli } from './cli.js';
import { loadConfigFile } from '../src/config/loader.js';

import { AgentConfig } from '../src/config/types.js';
// Load environment variables
dotenv.config();

Expand Down Expand Up @@ -44,7 +44,6 @@ program.parse();
const options = program.opts();
const configFile = options.configFile;
const connectionMode = options.strict ? 'strict' : ('lenient' as 'strict' | 'lenient');
const verbose = options.verbose !== false;

// Platform-independent path handling
const normalizedConfigPath = path.normalize(configFile);
Expand Down Expand Up @@ -75,37 +74,23 @@ logger.info('');
async function startAiClient() {
try {
// Load the agent configuration
const config = await loadConfigFile(normalizedConfigPath);

// Validate MCP servers section exists
if (!config.mcpServers || Object.keys(config.mcpServers).length === 0) {
logger.error('Error: No MCP server configurations found in the provided file');
process.exit(1);
}
logger.info(
`Found ${Object.keys(config.mcpServers).length} server configurations in ${normalizedConfigPath}`,
null,
'green'
);
const config: AgentConfig = await loadConfigFile(normalizedConfigPath);

// Validate LLM section exists, use defaults if not
if (!config.llm) {
logger.info('No LLM configuration found, using defaults', null, 'yellow');
config.llm = {
provider: 'openai',
model: 'gpt-4o-mini',
apiKey: 'env:OPENAI_API_KEY',
};
}
validateAgentConfig(config);

logger.info('===============================================');
logger.info('Starting AI-powered MCP client...', null, 'cyanBright');
logger.info('===============================================\n');

await initializeAiCli(config, connectionMode);
} catch (error) {
logger.error('Error: Failed to load configuration from file');
logger.error(error);
logger.error(
`Error: Failed to initialize AI CLI from config file${normalizedConfigPath}: ${JSON.stringify(
error,
null,
2
)}`
);
process.exit(1);
}
}
Expand All @@ -116,3 +101,30 @@ startAiClient().catch((error) => {
logger.error(error);
process.exit(1);
});

function validateAgentConfig(config: AgentConfig): void {
logger.info('Validating agent config', null, 'cyanBright');
if (!config.mcpServers || Object.keys(config.mcpServers).length === 0) {
throw new Error('No MCP server configurations provided');
}

// Validate LLM section exists, use defaults if not
if (!config.llm) {
logger.info('No LLM configuration found, using defaults', null, 'yellow');
config.llm = {
provider: 'openai',
model: 'gpt-4o-mini',
apiKey: 'env:OPENAI_API_KEY',
};
}

if (!config.llm.provider || !config.llm.model) {
throw new Error('LLM configuration must specify provider and model');
}

logger.info(
`Found ${Object.keys(config.mcpServers).length} server configurations`,
null,
'green'
);
}
3 changes: 3 additions & 0 deletions configuration/mcp.json
Original file line number Diff line number Diff line change
@@ -1,14 +1,17 @@
{
"mcpServers": {
"desktopCommander": {
"type": "stdio",
"command": "npx",
"args": ["-y", "@wonderwhy-er/desktop-commander"]
},
"puppeteer": {
"type": "stdio",
"command": "node",
"args": ["--loader", "ts-node/esm", "src/servers/puppeteerServer.ts"]
},
"dockerServer": {
"type": "stdio",
"command": "node",
"args": ["--loader", "ts-node/esm", "src/servers/dockerServer.ts"]
}
Expand Down
Loading
0