Skip to main content

Complete Integration Examples

Full working examples for different platforms and use cases.

Claude Desktop Integration

Complete Setup & Usage

Step 1: Configuration Edit ~/Library/Application Support/Claude/claude_desktop_config.json:
{
  "mcpServers": {
    "myweave": {
      "command": "npx",
      "args": ["-y", "@mintlify/mcp-server", "https://docs.myweave.ai"]
    }
  },
  "environmentVariables": {
    "MYWEAVE_API_KEY": "your_api_key_here"
  }
}
Step 2: Example Prompts Once configured, try these in Claude: Query Documentation:
"Using myweave docs, show me how to start a chat with an expert"

"What parameters does the myweave /chat endpoint accept?"

"How do I handle streaming responses from myweave?"
Generate Integration Code:
"Write a TypeScript function that:
1. Starts a chat with a myweave expert
2. Handles the streaming response
3. Extracts the thread ID
4. Includes proper error handling"
Execute API Calls (if API key configured):
"Start a conversation with a leadership coach about team management 
using myweave API"

Custom MCP Client - Full Implementation

Complete TypeScript MCP Client

import { MCPClient } from '@modelcontextprotocol/sdk';

class MyWeaveMCPClient {
  private mcp: MCPClient;
  private apiKey: string;
  private baseUrl: string = 'https://api.myweave.ai/functions/v1';
  
  constructor(apiKey: string) {
    this.apiKey = apiKey;
    this.mcp = new MCPClient({
      serverUrl: 'https://docs.myweave.ai/mcp'
    });
  }
  
  // Query documentation
  async queryDocs(question: string): Promise<string> {
    const response = await this.mcp.queryDocs({
      query: question,
      context: "myweave API documentation"
    });
    
    return response.answer;
  }
  
  // Start expert consultation
  async startChat(message: string, coachId: string, userId?: string): Promise<{
    threadId: string;
    response: string;
  }> {
    const response = await fetch(`${this.baseUrl}/chat`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'X-API-Key': this.apiKey
      },
      body: JSON.stringify({
        message,
        context: {
          coachId,
          userId,
          isAuthenticated: !!userId
        }
      })
    });
    
    const threadId = response.headers.get('X-Thread-Id') || '';
    
    // Handle streaming response
    const reader = response.body?.getReader();
    const decoder = new TextDecoder();
    let fullResponse = '';
    
    while (true) {
      const { done, value } = await reader!.read();
      if (done) break;
      
      const chunk = decoder.decode(value);
      const lines = chunk.split('\n');
      
      for (const line of lines) {
        if (line.startsWith('data: ')) {
          fullResponse += line.slice(6);
        }
      }
    }
    
    return { threadId, response: fullResponse };
  }
  
  // Get user's conversation threads
  async getUserThreads(userId: string, coachId?: string): Promise<any[]> {
    const url = new URL(`${this.baseUrl}/users/${userId}/threads`);
    if (coachId) url.searchParams.set('coachId', coachId);
    
    const response = await fetch(url.toString(), {
      headers: { 'X-API-Key': this.apiKey }
    });
    
    const { data } = await response.json();
    return data;
  }
  
  // Get messages from thread
  async getThreadMessages(threadId: string): Promise<any[]> {
    const response = await fetch(
      `${this.baseUrl}/threads/${threadId}/messages`,
      {
        headers: { 'X-API-Key': this.apiKey }
      }
    );
    
    const { data } = await response.json();
    return data;
  }
  
  // Upload knowledge
  async uploadKnowledge(coachId: string, files: File[]): Promise<any> {
    const formData = new FormData();
    files.forEach(file => formData.append('files', file));
    
    const response = await fetch(
      `${this.baseUrl}/knowledge-handler?coachId=${coachId}`,
      {
        method: 'POST',
        headers: { 'X-API-Key': this.apiKey },
        body: formData
      }
    );
    
    return await response.json();
  }
}

// Usage Example
const client = new MyWeaveMCPClient(process.env.MYWEAVE_API_KEY!);

// Query docs via MCP
const authInfo = await client.queryDocs("How do I authenticate?");
console.log(authInfo);

// Start a chat
const chat = await client.startChat(
  "I need help with leadership development",
  "coach_123",
  "user_456"
);
console.log("Thread:", chat.threadId);
console.log("Response:", chat.response);

// Get user's threads
const threads = await client.getUserThreads("user_456");
console.log("User has", threads.length, "conversations");

Hybrid AI App - Complete Example

AI Assistant with Human Expert Fallback

class HybridIntelligenceApp {
  private llm: any; // Your LLM client
  private myweave: MyWeaveMCPClient;
  
  constructor(llmClient: any, myweaveApiKey: string) {
    this.llm = llmClient;
    this.myweave = new MyWeaveMCPClient(myweaveApiKey);
  }
  
  async respond(userMessage: string, context: any): Promise<{
    response: string;
    source: 'ai' | 'human' | 'hybrid';
    confidence: number;
  }> {
    // Step 1: AI attempts to respond
    const aiResponse = await this.llm.generate({
      messages: [
        { role: 'system', content: 'You are a helpful assistant.' },
        { role: 'user', content: userMessage }
      ]
    });
    
    // Step 2: Assess AI confidence
    const confidence = await this.assessConfidence(aiResponse, userMessage);
    
    // Step 3: Determine if human expertise needed
    if (confidence < 0.7 || this.requiresHumanJudgment(userMessage)) {
      console.log('Low confidence or complex query - consulting human expert');
      
      // Step 4: Consult human expert via myweave
      const expertType = this.determineExpertType(userMessage);
      const humanResponse = await this.myweave.startChat(
        `User question: ${userMessage}\nAI's attempt: ${aiResponse}\nPlease provide expert guidance.`,
        expertType.coachId,
        context.userId
      );
      
      // Step 5: AI synthesizes both perspectives
      const synthesis = await this.llm.generate({
        messages: [
          { 
            role: 'system', 
            content: 'Combine AI and human expert perspectives into a cohesive answer.' 
          },
          { 
            role: 'user', 
            content: `AI: ${aiResponse}\nHuman Expert: ${humanResponse.response}\nSynthesize:` 
          }
        ]
      });
      
      return {
        response: synthesis,
        source: 'hybrid',
        confidence: 0.95
      };
    }
    
    // High confidence - return AI response
    return {
      response: aiResponse,
      source: 'ai',
      confidence
    };
  }
  
  private assessConfidence(response: string, question: string): number {
    // Implement your confidence scoring logic
    // Consider: response length, uncertainty markers, question complexity
    return 0.8; // Example
  }
  
  private requiresHumanJudgment(message: string): boolean {
    const humanNeededKeywords = [
      'ethical dilemma', 'career decision', 'relationship advice',
      'strategic', 'complex situation', 'not sure what to do'
    ];
    
    return humanNeededKeywords.some(keyword => 
      message.toLowerCase().includes(keyword)
    );
  }
  
  private determineExpertType(message: string): { coachId: string; mode: string } {
    // Map question type to appropriate expert
    if (message.includes('career') || message.includes('job')) {
      return { coachId: 'career_coach_123', mode: 'career_consulting' };
    }
    if (message.includes('leadership') || message.includes('team')) {
      return { coachId: 'leadership_coach_456', mode: 'leadership' };
    }
    if (message.includes('strategy') || message.includes('business')) {
      return { coachId: 'strategy_consultant_789', mode: 'strategic_consulting' };
    }
    
    // Default to general coaching
    return { coachId: 'general_coach_001', mode: 'general' };
  }
}

// Usage
const app = new HybridIntelligenceApp(
  myLLMClient,
  process.env.MYWEAVE_API_KEY!
);

const result = await app.respond(
  "I'm facing a difficult decision about whether to take a new job offer",
  { userId: "user_123" }
);

console.log('Source:', result.source); // 'ai', 'human', or 'hybrid'
console.log('Response:', result.response);
console.log('Confidence:', result.confidence);

Python Implementation

Full Python MCP Client

import requests
from typing import Optional, List, Dict, Any

class MyWeaveMCPClient:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.base_url = "https://api.myweave.ai/functions/v1"
        self.mcp_url = "https://docs.myweave.ai/mcp"
    
    def query_docs(self, question: str) -> str:
        """Query myweave documentation via MCP"""
        # MCP client implementation
        # (Simplified - use actual MCP SDK)
        response = requests.get(
            f"{self.mcp_url}/query",
            params={"q": question}
        )
        return response.json()['answer']
    
    def start_chat(
        self, 
        message: str, 
        coach_id: str, 
        user_id: Optional[str] = None
    ) -> Dict[str, Any]:
        """Start expert consultation"""
        url = f"{self.base_url}/chat"
        headers = {
            "Content-Type": "application/json",
            "X-API-Key": self.api_key
        }
        data = {
            "message": message,
            "context": {
                "coachId": coach_id,
                "userId": user_id,
                "isAuthenticated": bool(user_id)
            }
        }
        
        response = requests.post(url, headers=headers, json=data, stream=True)
        thread_id = response.headers.get('X-Thread-Id')
        
        # Collect streaming response
        full_response = ""
        for line in response.iter_lines():
            if line:
                decoded = line.decode('utf-8')
                if decoded.startswith('data: '):
                    full_response += decoded[6:]
        
        return {
            "thread_id": thread_id,
            "response": full_response
        }
    
    def get_user_threads(
        self, 
        user_id: str, 
        coach_id: Optional[str] = None
    ) -> List[Dict]:
        """Get user's conversation threads"""
        url = f"{self.base_url}/users/{user_id}/threads"
        if coach_id:
            url += f"?coachId={coach_id}"
        
        response = requests.get(url, headers={"X-API-Key": self.api_key})
        return response.json()['data']
    
    def get_thread_messages(self, thread_id: str) -> List[Dict]:
        """Get messages from a thread"""
        url = f"{self.base_url}/threads/{thread_id}/messages"
        response = requests.get(url, headers={"X-API-Key": self.api_key})
        return response.json()['data']


# Example: Hybrid AI Application
class HybridAI:
    def __init__(self, llm_client, myweave_api_key: str):
        self.llm = llm_client
        self.myweave = MyWeaveMCPClient(myweave_api_key)
    
    def respond(self, user_message: str, user_context: Dict) -> Dict[str, Any]:
        # AI attempts response
        ai_response = self.llm.generate(user_message)
        confidence = self.assess_confidence(ai_response)
        
        # Low confidence or complex query → human expert
        if confidence < 0.7:
            expert = self.myweave.start_chat(
                f"{user_message}\n\nAI attempted: {ai_response}",
                coach_id="expert_123",
                user_id=user_context.get('user_id')
            )
            
            return {
                "response": expert['response'],
                "source": "human_expert",
                "thread_id": expert['thread_id'],
                "confidence": 0.95
            }
        
        return {
            "response": ai_response,
            "source": "ai",
            "confidence": confidence
        }
    
    def assess_confidence(self, response: str) -> float:
        # Your confidence assessment logic
        return 0.8

# Usage
client = MyWeaveMCPClient(api_key="your_key_here")
app = HybridAI(my_llm_client, "your_myweave_key")

result = app.respond(
    "I'm facing a career transition decision",
    {"user_id": "user_123"}
)

print(f"Source: {result['source']}")
print(f"Response: {result['response']}")

React Application Example

Chat Component with Expert Integration

import React, { useState } from 'react';
import { MyWeaveMCPClient } from './myweave-client';

export function HybridChatComponent() {
  const [messages, setMessages] = useState<any[]>([]);
  const [input, setInput] = useState('');
  const client = new MyWeaveMCPClient(process.env.REACT_APP_MYWEAVE_API_KEY!);
  
  const sendMessage = async () => {
    const userMsg = { role: 'user', content: input };
    setMessages(prev => [...prev, userMsg]);
    setInput('');
    
    try {
      // AI attempts to respond
      const aiResponse = await yourLLM.generate(input);
      const confidence = assessConfidence(aiResponse);
      
      if (confidence < 0.7) {
        // Escalate to human expert
        const expertChat = await client.startChat(
          input,
          'coach_123',
          'user_456'
        );
        
        setMessages(prev => [...prev, {
          role: 'assistant',
          content: expertChat.response,
          source: 'human_expert',
          threadId: expertChat.threadId
        }]);
      } else {
        // Use AI response
        setMessages(prev => [...prev, {
          role: 'assistant',
          content: aiResponse,
          source: 'ai'
        }]);
      }
    } catch (error) {
      console.error('Error:', error);
    }
  };
  
  return (
    <div className="chat-container">
      <div className="messages">
        {messages.map((msg, idx) => (
          <div key={idx} className={`message ${msg.role}`}>
            <div className="content">{msg.content}</div>
            {msg.source && (
              <div className="source-badge">
                {msg.source === 'human_expert' ? '🧑‍🎓 Expert' : '🤖 AI'}
              </div>
            )}
          </div>
        ))}
      </div>
      
      <div className="input-area">
        <input
          value={input}
          onChange={(e) => setInput(e.target.value)}
          onKeyPress={(e) => e.key === 'Enter' && sendMessage()}
          placeholder="Ask anything..."
        />
        <button onClick={sendMessage}>Send</button>
      </div>
    </div>
  );
}

Node.js Backend Example

Express Server with myweave Integration

import express from 'express';
import { MyWeaveMCPClient } from './myweave-client';

const app = express();
app.use(express.json());

const myweave = new MyWeaveMCPClient(process.env.MYWEAVE_API_KEY!);

// Endpoint: Start consultation
app.post('/api/consult', async (req, res) => {
  const { message, userId, expertType } = req.body;
  
  try {
    // Determine appropriate expert
    const coachId = getCoachIdForExpertType(expertType);
    
    // Start chat via myweave
    const consultation = await myweave.startChat(message, coachId, userId);
    
    res.json({
      success: true,
      threadId: consultation.threadId,
      response: consultation.response
    });
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

// Endpoint: Get user's consultations
app.get('/api/users/:userId/consultations', async (req, res) => {
  try {
    const threads = await myweave.getUserThreads(req.params.userId);
    res.json({ consultations: threads });
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

// Endpoint: Get conversation history
app.get('/api/threads/:threadId/messages', async (req, res) => {
  try {
    const messages = await myweave.getThreadMessages(req.params.threadId);
    res.json({ messages });
  } catch (error) {
    res.status(500).json({ error: error.message });
  }
});

function getCoachIdForExpertType(expertType: string): string {
  const mapping: Record<string, string> = {
    'career': 'career_coach_123',
    'leadership': 'leadership_coach_456',
    'strategy': 'strategy_consultant_789'
  };
  return mapping[expertType] || 'general_coach_001';
}

app.listen(3000, () => {
  console.log('Hybrid AI app running on port 3000');
});

Testing Your Integration

Unit Tests

import { describe, it, expect } from 'vitest';
import { MyWeaveMCPClient } from './myweave-client';

describe('MyWeave MCP Integration', () => {
  const client = new MyWeaveMCPClient(process.env.MYWEAVE_API_KEY!);
  
  it('should query documentation via MCP', async () => {
    const response = await client.queryDocs('How do I authenticate?');
    expect(response).toContain('API Key');
    expect(response).toContain('JWT');
  });
  
  it('should start a chat and get thread ID', async () => {
    const chat = await client.startChat(
      'Test message',
      'coach_123'
    );
    
    expect(chat.threadId).toBeTruthy();
    expect(chat.response).toBeTruthy();
  });
  
  it('should get user threads', async () => {
    const threads = await client.getUserThreads('user_test_123');
    expect(Array.isArray(threads)).toBe(true);
  });
});

Best Practices

1. Graceful Degradation

async function respond(message: string) {
  try {
    // Try AI first
    const aiResponse = await llm.generate(message);
    
    if (needsHuman(message)) {
      try {
        // Try human expert
        return await myweave.consult(message);
      } catch (expertError) {
        // Fallback to AI if expert unavailable
        console.warn('Expert unavailable, using AI:', expertError);
        return aiResponse;
      }
    }
    
    return aiResponse;
  } catch (error) {
    return "I'm having trouble right now. Please try again.";
  }
}

2. Cost Optimization

// Cache common questions with AI
const aiCache = new Map<string, string>();

async function respond(message: string) {
  // Check cache first
  if (aiCache.has(message)) {
    return { response: aiCache.get(message), cost: 0 };
  }
  
  // Use AI for simple queries
  if (isSimpleQuery(message)) {
    const response = await llm.generate(message);
    aiCache.set(message, response);
    return { response, cost: 0.001 };
  }
  
  // Use human expert for complex/high-value queries
  const expert = await myweave.consult(message);
  return { response: expert, cost: 2.50 };
}

3. User Preference

async function respond(message: string, userPreferences: any) {
  // Let users choose AI vs. human
  if (userPreferences.preferHumanExpert) {
    return await myweave.consult(message);
  }
  
  // Or hybrid by default with option to override
  const ai = await llm.generate(message);
  
  return {
    aiResponse: ai,
    actions: [
      {
        label: "Get Expert Opinion",
        action: () => myweave.consult(message)
      }
    ]
  };
}

Resources