Third-Party Integrations
The Earna AI Console provides seamless integration with various external services including AI models, real-time features, and backend services.
Overview
Supported Integration Categories
- AI Models: GPT-4o, Claude 3 Opus, Gemini Pro, Mistral, xAI Grok
- Real-time Features: HeyGen Avatars, GPT-4o Realtime Voice, WebRTC
- Backend Services: Supabase (Auth, Database, Storage, Realtime)
- Deployment: Vercel Edge Functions, Cloudflare Workers
- Monitoring: Sentry, LogRocket, Datadog
- Analytics: PostHog, Mixpanel, Google Analytics
- Communication: SendGrid, Resend, Twilio
AI Model Integrations
GPT-4o (Primary Model)
Integrate OpenAI’s GPT-4o model for advanced conversational AI capabilities.
Configuration
// config/ai/openai.ts
export const openaiConfig = {
apiKey: process.env.OPENAI_API_KEY!,
organization: process.env.OPENAI_ORG_ID,
defaultModel: 'gpt-4o',
models: {
'gpt-4o': {
maxTokens: 4096,
temperature: 0.7,
topP: 1,
frequencyPenalty: 0,
presencePenalty: 0
},
'gpt-4o-realtime': {
voice: 'alloy',
audioFormat: 'pcm16',
sampleRate: 24000
}
}
};
Implementation
// services/ai/openai.service.ts
import { openai } from '@ai-sdk/openai';
import { streamText, generateText } from 'ai';
export class OpenAIService {
async streamChat(messages: Message[], settings?: ModelSettings) {
const result = await streamText({
model: openai('gpt-4o'),
messages,
temperature: settings?.temperature ?? 0.7,
maxTokens: settings?.maxTokens ?? 4096,
topP: settings?.topP ?? 1,
onFinish: async ({ text, usage }) => {
await this.logUsage(usage);
}
});
return result.toDataStreamResponse();
}
async generateCompletion(prompt: string) {
const { text } = await generateText({
model: openai('gpt-4o'),
prompt,
temperature: 0.7
});
return text;
}
}
Claude 3 Opus Integration
Integrate Anthropic’s Claude 3 Opus for advanced reasoning and analysis.
Setup
pnpm add @ai-sdk/anthropic
Configuration
// config/ai/anthropic.ts
export const anthropicConfig = {
apiKey: process.env.ANTHROPIC_API_KEY,
defaultModel: 'claude-3-opus-20240229',
models: {
'claude-3-opus': {
maxTokens: 4096,
temperature: 0.7,
topP: 1
},
'claude-3-sonnet': {
maxTokens: 4096,
temperature: 0.7
}
}
};
Service Implementation
// services/ai/anthropic.service.ts
import { anthropic } from '@ai-sdk/anthropic';
import { streamText } from 'ai';
export class AnthropicService {
async streamChat(messages: Message[], settings?: ModelSettings) {
const result = await streamText({
model: anthropic('claude-3-opus-20240229'),
messages,
temperature: settings?.temperature ?? 0.7,
maxTokens: settings?.maxTokens ?? 4096,
system: 'You are a helpful AI assistant powered by Claude 3 Opus.'
});
return result.toDataStreamResponse();
}
}
Real-time Features Integration
HeyGen Interactive Avatars
Integrate HeyGen’s interactive avatar API for real-time avatar interactions.
Installation
pnpm add @heygen/streaming-avatar
Configuration
// config/integrations/heygen.ts
export const heygenConfig = {
apiKey: process.env.HEYGEN_API_KEY!,
defaultAvatar: {
avatarId: 'josh_lite3_20230714',
voiceId: '077ab11b14f04ce0b49b5f6e5cc20979',
quality: 'high',
knowledgeBase: '6750918b2dd08e3d264d5275'
},
streaming: {
maxDuration: 3600000, // 1 hour
videoCodec: 'H264',
audioCodec: 'OPUS'
}
};
Avatar Service
// services/avatar.service.ts
import { StreamingAvatarApi, AvatarQuality } from '@heygen/streaming-avatar';
export class AvatarService {
private avatar: StreamingAvatarApi;
private sessionToken?: string;
async initializeAvatar() {
this.avatar = new StreamingAvatarApi({
apiKey: heygenConfig.apiKey,
serverUrl: process.env.HEYGEN_SERVER_URL
});
const session = await this.avatar.createStarterSession({
quality: AvatarQuality.High,
avatarName: heygenConfig.defaultAvatar.avatarId,
voice: { voiceId: heygenConfig.defaultAvatar.voiceId }
});
this.sessionToken = session.session_token;
return session;
}
async speak(text: string, llmResponse?: string) {
if (!this.sessionToken) {
await this.initializeAvatar();
}
await this.avatar.speak({
text,
taskType: llmResponse ? 'repeat' : 'talk',
taskMode: 'async'
});
}
async interrupt() {
await this.avatar.interrupt();
}
}
GPT-4o Realtime Voice
Implement real-time voice interactions using GPT-4o’s realtime API.
// services/realtime-voice.service.ts
import { RealtimeClient } from '@openai/realtime-api-beta';
export class RealtimeVoiceService {
private client: RealtimeClient;
private pc: RTCPeerConnection;
async initialize() {
this.client = new RealtimeClient({
apiKey: process.env.OPENAI_API_KEY,
dangerouslyAllowBrowser: false
});
await this.client.updateSession({
instructions: 'You are a helpful AI assistant.',
voice: 'alloy',
input_audio_format: 'pcm16',
output_audio_format: 'pcm16',
turn_detection: {
type: 'server_vad',
threshold: 0.5,
prefix_padding_ms: 300,
silence_duration_ms: 500
}
});
await this.client.connect();
}
async streamAudio(audioData: ArrayBuffer) {
await this.client.appendInputAudio(audioData);
}
}
Backend Service Integrations
Supabase Integration
Complete backend infrastructure including auth, database, storage, and realtime.
Configuration
// config/supabase.ts
import { createClient } from '@supabase/supabase-js';
export const supabaseConfig = {
url: process.env.NEXT_PUBLIC_SUPABASE_URL!,
anonKey: process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
serviceKey: process.env.SUPABASE_SERVICE_ROLE_KEY!,
database: {
schema: 'public',
tables: {
users: 'users',
conversations: 'conversations',
messages: 'messages',
files: 'files'
}
},
storage: {
buckets: {
avatars: 'avatars',
uploads: 'user-uploads',
exports: 'exports'
}
}
};
export const supabase = createClient(
supabaseConfig.url,
supabaseConfig.anonKey
);
Service Implementation
// services/supabase.service.ts
import { createServerClient } from '@supabase/ssr';
export class SupabaseService {
private client: SupabaseClient;
constructor() {
this.client = createServerClient(
supabaseConfig.url,
supabaseConfig.serviceKey
);
}
async saveConversation(userId: string, messages: Message[]) {
const { data, error } = await this.client
.from('conversations')
.insert({
user_id: userId,
messages,
model: 'gpt-4o',
created_at: new Date().toISOString()
})
.select()
.single();
if (error) throw error;
return data;
}
async uploadFile(file: File, userId: string) {
const fileName = `${userId}/${Date.now()}-${file.name}`;
const { data, error } = await this.client.storage
.from('user-uploads')
.upload(fileName, file);
if (error) throw error;
return data.path;
}
async subscribeToMessages(conversationId: string, callback: (message: Message) => void) {
return this.client
.channel(`conversation:${conversationId}`)
.on('postgres_changes', {
event: 'INSERT',
schema: 'public',
table: 'messages',
filter: `conversation_id=eq.${conversationId}`
}, callback)
.subscribe();
}
}
Vercel AI SDK Integration
Unified interface for multiple AI providers.
// services/ai/vercel-ai.service.ts
import { createOpenAI } from '@ai-sdk/openai';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { streamText } from 'ai';
export class UnifiedAIService {
private providers = {
openai: createOpenAI({ apiKey: process.env.OPENAI_API_KEY }),
anthropic: createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY }),
google: createGoogleGenerativeAI({ apiKey: process.env.GOOGLE_API_KEY })
};
getModel(modelId: string) {
switch (modelId) {
case 'gpt-4o':
return this.providers.openai('gpt-4o');
case 'claude-3-opus':
return this.providers.anthropic('claude-3-opus-20240229');
case 'gemini-pro':
return this.providers.google('gemini-1.5-pro');
default:
return this.providers.openai('gpt-4o');
}
}
async streamResponse(modelId: string, messages: Message[]) {
const result = await streamText({
model: this.getModel(modelId),
messages,
onFinish: async ({ text, usage }) => {
await this.trackUsage(modelId, usage);
}
});
return result.toDataStreamResponse();
}
}
Deployment & Edge Functions
Vercel Edge Functions
Deploy AI endpoints on Vercel’s edge network for optimal performance.
// app/api/chat/route.ts
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export const runtime = 'edge';
export const maxDuration = 30;
export async function POST(req: Request) {
const { messages, model = 'gpt-4o' } = await req.json();
const result = await streamText({
model: openai(model),
messages,
onFinish: async ({ text, usage }) => {
// Track usage in Supabase
await trackUsage(usage);
}
});
return result.toDataStreamResponse();
}
Cloudflare Workers
Alternative edge deployment for global distribution.
// workers/chat.ts
import { Ai } from '@cloudflare/ai';
export default {
async fetch(request: Request, env: Env) {
const ai = new Ai(env.AI);
const { messages } = await request.json();
const response = await ai.run('@cf/meta/llama-3-8b-instruct', {
messages,
stream: true
});
return new Response(response, {
headers: { 'content-type': 'text/event-stream' }
});
}
};
Monitoring & Analytics
Sentry Error Tracking
Monitor errors and performance across the application.
// lib/sentry.ts
import * as Sentry from '@sentry/nextjs';
Sentry.init({
dsn: process.env.SENTRY_DSN,
environment: process.env.NODE_ENV,
tracesSampleRate: 1.0,
integrations: [
Sentry.replayIntegration(),
Sentry.feedbackIntegration({
colorScheme: 'auto'
})
]
});
// Track AI interactions
export function trackAIInteraction(model: string, usage: any) {
Sentry.addBreadcrumb({
category: 'ai',
message: `AI interaction with ${model}`,
level: 'info',
data: {
model,
tokens: usage.totalTokens,
duration: usage.duration
}
});
}
PostHog Product Analytics
Track user behavior and feature usage.
// lib/posthog.ts
import posthog from 'posthog-js';
posthog.init(process.env.NEXT_PUBLIC_POSTHOG_KEY!, {
api_host: process.env.NEXT_PUBLIC_POSTHOG_HOST,
capture_pageview: false,
capture_pageleave: true
});
// Track feature usage
posthog.capture('chat_started', {
model: 'gpt-4o',
hasAvatar: true,
voiceEnabled: false
});
// Track avatar interactions
posthog.capture('avatar_interaction', {
avatarId: 'josh_lite3',
action: 'speak',
duration: 30
});
Communication Services
SendGrid Email
Transactional emails and notifications.
// services/sendgrid.service.ts
import sgMail from '@sendgrid/mail';
sgMail.setApiKey(process.env.SENDGRID_API_KEY!);
export async function sendWelcomeEmail(to: string, name: string) {
const msg = {
to,
from: 'noreply@earna.ai',
subject: 'Welcome to Earna AI Console',
html: `
<h1>Welcome ${name}!</h1>
<p>Your AI-powered console is ready.</p>
<p>Start chatting with GPT-4o or try our interactive avatars.</p>
`
};
return sgMail.send(msg);
}
Resend Email API
Alternative email service with React Email templates.
// services/resend.service.ts
import { Resend } from 'resend';
import { WelcomeEmail } from '@/emails/welcome';
const resend = new Resend(process.env.RESEND_API_KEY);
export async function sendEmail(to: string, subject: string, react: React.ReactElement) {
const { data, error } = await resend.emails.send({
from: 'Earna AI <onboarding@earna.ai>',
to,
subject,
react
});
if (error) throw error;
return data;
}
Development Tools
Prettier Code Formatting
Automatic code formatting across the project.
// .prettierrc
{
"semi": true,
"trailingComma": "es5",
"singleQuote": true,
"printWidth": 100,
"tabWidth": 2,
"useTabs": false,
"bracketSpacing": true,
"arrowParens": "avoid",
"endOfLine": "lf"
}
ESLint Configuration
Code quality and consistency enforcement.
// .eslintrc.json
{
"extends": [
"next/core-web-vitals",
"plugin:@typescript-eslint/recommended"
],
"rules": {
"@typescript-eslint/no-unused-vars": "error",
"@typescript-eslint/no-explicit-any": "warn",
"react-hooks/rules-of-hooks": "error",
"react-hooks/exhaustive-deps": "warn"
}
}
WebRTC Integration
Real-time Communication
// services/webrtc.service.ts
export class WebRTCService {
private pc: RTCPeerConnection;
private localStream: MediaStream | null = null;
private dataChannel: RTCDataChannel | null = null;
async initialize() {
this.pc = new RTCPeerConnection({
iceServers: [
{ urls: 'stun:stun.l.google.com:19302' },
{
urls: process.env.TURN_SERVER_URL!,
username: process.env.TURN_USERNAME,
credential: process.env.TURN_CREDENTIAL
}
]
});
// Set up data channel for chat
this.dataChannel = this.pc.createDataChannel('chat');
// Get user media for voice
this.localStream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
},
video: false
});
this.localStream.getTracks().forEach(track => {
this.pc.addTrack(track, this.localStream!);
});
}
async createOffer() {
const offer = await this.pc.createOffer();
await this.pc.setLocalDescription(offer);
return offer;
}
async handleAnswer(answer: RTCSessionDescriptionInit) {
await this.pc.setRemoteDescription(answer);
}
}
Security Best Practices
API Key Management
// lib/security/api-keys.ts
import crypto from 'crypto';
export class APIKeyManager {
private algorithm = 'aes-256-gcm';
private key = Buffer.from(process.env.ENCRYPTION_KEY!, 'hex');
encrypt(text: string): string {
const iv = crypto.randomBytes(16);
const cipher = crypto.createCipheriv(this.algorithm, this.key, iv);
let encrypted = cipher.update(text, 'utf8', 'hex');
encrypted += cipher.final('hex');
const authTag = cipher.getAuthTag();
return iv.toString('hex') + ':' + authTag.toString('hex') + ':' + encrypted;
}
decrypt(text: string): string {
const parts = text.split(':');
const iv = Buffer.from(parts[0], 'hex');
const authTag = Buffer.from(parts[1], 'hex');
const encrypted = parts[2];
const decipher = crypto.createDecipheriv(this.algorithm, this.key, iv);
decipher.setAuthTag(authTag);
let decrypted = decipher.update(encrypted, 'hex', 'utf8');
decrypted += decipher.final('utf8');
return decrypted;
}
}
Rate Limiting
// middleware/rate-limit.ts
import { Ratelimit } from '@upstash/ratelimit';
import { Redis } from '@upstash/redis';
const ratelimit = new Ratelimit({
redis: Redis.fromEnv(),
limiter: Ratelimit.slidingWindow(10, '10 s'),
analytics: true
});
export async function rateLimitMiddleware(req: Request) {
const ip = req.headers.get('x-forwarded-for') ?? 'anonymous';
const { success, limit, reset, remaining } = await ratelimit.limit(ip);
if (!success) {
return new Response('Too Many Requests', {
status: 429,
headers: {
'X-RateLimit-Limit': limit.toString(),
'X-RateLimit-Remaining': remaining.toString(),
'X-RateLimit-Reset': new Date(reset).toISOString()
}
});
}
return null;
}
Testing & Quality Assurance
Unit Testing
// __tests__/services/ai.test.ts
import { UnifiedAIService } from '@/services/ai/vercel-ai.service';
import { mockMessages } from '@/__mocks__/messages';
describe('AI Service Integration', () => {
let service: UnifiedAIService;
beforeEach(() => {
service = new UnifiedAIService();
});
it('should stream GPT-4o responses', async () => {
const stream = await service.streamResponse('gpt-4o', mockMessages);
expect(stream).toBeDefined();
expect(stream.headers.get('content-type')).toBe('text/event-stream');
});
it('should handle model switching', async () => {
const models = ['gpt-4o', 'claude-3-opus', 'gemini-pro'];
for (const model of models) {
const result = service.getModel(model);
expect(result).toBeDefined();
}
});
});
E2E Testing
// __tests__/e2e/chat-flow.test.ts
import { test, expect } from '@playwright/test';
test('complete chat interaction with avatar', async ({ page }) => {
await page.goto('/chat');
// Select GPT-4o model
await page.selectOption('[data-testid="model-selector"]', 'gpt-4o');
// Enable avatar
await page.click('[data-testid="avatar-toggle"]');
// Send message
await page.fill('[data-testid="chat-input"]', 'Hello, how are you?');
await page.press('[data-testid="chat-input"]', 'Enter');
// Wait for response
await expect(page.locator('[data-testid="ai-message"]')).toBeVisible();
await expect(page.locator('[data-testid="avatar-video"]')).toBeVisible();
});
Environment Variables
Required Variables
# AI Providers
OPENAI_API_KEY=sk-... # Required for GPT-4o
ANTHROPIC_API_KEY=sk-ant-... # Optional for Claude 3 Opus
GOOGLE_API_KEY=... # Optional for Gemini Pro
MISTRAL_API_KEY=... # Optional for Mistral
XAI_API_KEY=... # Optional for Grok
PERPLEXITY_API_KEY=... # Optional for Perplexity
# Supabase
NEXT_PUBLIC_SUPABASE_URL=https://xxx.supabase.co
NEXT_PUBLIC_SUPABASE_ANON_KEY=eyJ...
SUPABASE_SERVICE_ROLE_KEY=eyJ...
# HeyGen Avatars
HEYGEN_API_KEY=...
HEYGEN_SERVER_URL=wss://api.heygen.com/v1/streaming.avatar
# Deployment
VERCEL_TOKEN=...
VERCEL_PROJECT_ID=...
VERCEL_ORG_ID=...
# Monitoring
SENTRY_DSN=https://...
NEXT_PUBLIC_POSTHOG_KEY=phc_...
NEXT_PUBLIC_POSTHOG_HOST=https://app.posthog.com
# Communication
SENDGRID_API_KEY=SG...
RESEND_API_KEY=re_...
# Security
ENCRYPTION_KEY=... # 32-byte hex string
NEXTAUTH_SECRET=... # Random secret
NEXTAUTH_URL=http://localhost:3000
# WebRTC (Optional)
TURN_SERVER_URL=turn:turnserver.example.com:3478
TURN_USERNAME=username
TURN_CREDENTIAL=password
Quick Integration Examples
Add GPT-4o to Your App
// app/api/chat/route.ts
import { openai } from '@ai-sdk/openai';
import { streamText } from 'ai';
export async function POST(req: Request) {
const { messages } = await req.json();
const result = await streamText({
model: openai('gpt-4o'),
messages
});
return result.toDataStreamResponse();
}
Add HeyGen Avatar
// components/avatar.tsx
import { StreamingAvatar } from '@heygen/streaming-avatar';
export function InteractiveAvatar() {
const [avatar, setAvatar] = useState<StreamingAvatar>();
const initAvatar = async () => {
const newAvatar = new StreamingAvatar({
token: await getAccessToken()
});
await newAvatar.init();
setAvatar(newAvatar);
};
return (
<div>
<video ref={avatarRef} autoPlay playsInline />
<button onClick={() => avatar?.speak('Hello!')}>Speak</button>
</div>
);
}
Add Supabase Auth
// lib/supabase.ts
import { createClient } from '@supabase/supabase-js';
const supabase = createClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!
);
// Sign up
const { user, error } = await supabase.auth.signUp({
email: 'user@example.com',
password: 'password'
});
// Sign in
const { user, session } = await supabase.auth.signInWithPassword({
email: 'user@example.com',
password: 'password'
});
Support
For integration support:
- Documentation: https://docs.earna.aiÂ
- GitHub: https://github.com/earna-ai/consoleÂ
- Discord: https://discord.gg/earna-aiÂ
- Email: support@earna.ai