feat: initial IQAI multi-model AI dashboard

- Express backend with Replicate API proxy (chat, models, account, search)
- React + Vite + Tailwind frontend with custom Midnight Violet color scheme
- @mention autocomplete to route messages to specific models
- Parallel multi-model queries with model selection in sidebar
- DuckDuckGo web search context injection
- Model manager UI (add/edit/remove Replicate models)
- Per-model system instructions per conversation
- Replicate account info display in sidebar
- Conversation history with local persistence (Zustand)
- Full Docker deployment (backend + nginx-served frontend)
- Montserrat + Poppins fonts

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Malin
2026-04-16 13:12:40 +02:00
commit 71965939a1
31 changed files with 2399 additions and 0 deletions

35
backend/routes/account.js Normal file
View File

@@ -0,0 +1,35 @@
import express from 'express';
const router = express.Router();
const REPLICATE_BASE = 'https://api.replicate.com/v1';
router.get('/', async (req, res) => {
const token = process.env.REPLICATE_API_TOKEN;
if (!token) return res.status(400).json({ error: 'REPLICATE_API_TOKEN not configured' });
try {
const response = await fetch(`${REPLICATE_BASE}/account`, {
headers: { 'Authorization': `Bearer ${token}` }
});
if (!response.ok) {
const err = await response.json().catch(() => ({}));
return res.status(response.status).json({ error: err.detail || 'Failed to fetch account' });
}
const account = await response.json();
// Try to get hardware/usage info
let hardware = null;
try {
const hwRes = await fetch(`${REPLICATE_BASE}/hardware`, {
headers: { 'Authorization': `Bearer ${token}` }
});
if (hwRes.ok) hardware = await hwRes.json();
} catch (_) {}
res.json({ account, hardware });
} catch (err) {
res.status(500).json({ error: err.message });
}
});
export { router as accountRouter };

129
backend/routes/chat.js Normal file
View File

@@ -0,0 +1,129 @@
import express from 'express';
import { loadModels } from './models.js';
const router = express.Router();
const REPLICATE_BASE = 'https://api.replicate.com/v1';
function buildInput(model, prompt, systemPrompt, extraParams) {
const base = { ...model.defaultInput };
// Inject system prompt if model supports it
if (model.systemPromptParam && systemPrompt) {
base[model.systemPromptParam] = systemPrompt;
}
// Apply extra params from user
if (extraParams) {
Object.assign(base, extraParams);
}
base.prompt = prompt;
return base;
}
async function runPrediction(model, input, token) {
const url = `${REPLICATE_BASE}/models/${model.owner}/${model.name}/predictions`;
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': `Bearer ${token}`,
'Content-Type': 'application/json',
'Prefer': 'wait'
},
body: JSON.stringify({ input })
});
if (!response.ok) {
const err = await response.json().catch(() => ({ detail: 'Unknown error' }));
throw new Error(err.detail || `HTTP ${response.status}`);
}
return response.json();
}
function extractOutput(prediction) {
const { output } = prediction;
if (!output) return '';
if (typeof output === 'string') return output;
if (Array.isArray(output)) return output.join('');
if (typeof output === 'object') return JSON.stringify(output, null, 2);
return String(output);
}
// POST /api/chat - single model call
router.post('/', async (req, res) => {
const token = process.env.REPLICATE_API_TOKEN;
if (!token) return res.status(400).json({ error: 'REPLICATE_API_TOKEN not configured' });
const { modelId, prompt, systemPrompt, searchContext, extraParams } = req.body;
if (!modelId || !prompt) return res.status(400).json({ error: 'modelId and prompt are required' });
try {
const models = await loadModels();
const model = models.find(m => m.id === modelId);
if (!model) return res.status(404).json({ error: `Model not found: ${modelId}` });
const finalPrompt = searchContext ? `${searchContext}\n\n${prompt}` : prompt;
const input = buildInput(model, finalPrompt, systemPrompt, extraParams);
const prediction = await runPrediction(model, input, token);
const content = extractOutput(prediction);
res.json({
id: prediction.id,
modelId: model.id,
modelTag: model.tag,
modelName: model.displayName,
content,
status: prediction.status,
metrics: prediction.metrics,
urls: prediction.urls
});
} catch (err) {
res.status(500).json({ error: err.message });
}
});
// POST /api/chat/multi - send to multiple models in parallel
router.post('/multi', async (req, res) => {
const token = process.env.REPLICATE_API_TOKEN;
if (!token) return res.status(400).json({ error: 'REPLICATE_API_TOKEN not configured' });
const { modelIds, prompt, systemPrompt, searchContext, extraParams } = req.body;
if (!modelIds?.length || !prompt) return res.status(400).json({ error: 'modelIds and prompt are required' });
try {
const models = await loadModels();
const tasks = modelIds.map(async (modelId) => {
const model = models.find(m => m.id === modelId);
if (!model) return { modelId, error: 'Model not found' };
try {
const finalPrompt = searchContext ? `${searchContext}\n\n${prompt}` : prompt;
const input = buildInput(model, finalPrompt, systemPrompt, extraParams);
const prediction = await runPrediction(model, input, token);
const content = extractOutput(prediction);
return {
id: prediction.id,
modelId: model.id,
modelTag: model.tag,
modelName: model.displayName,
content,
status: prediction.status,
metrics: prediction.metrics
};
} catch (err) {
return { modelId, modelTag: model.tag, modelName: model.displayName, error: err.message };
}
});
const results = await Promise.all(tasks);
res.json({ results });
} catch (err) {
res.status(500).json({ error: err.message });
}
});
export { router as chatRouter };

84
backend/routes/models.js Normal file
View File

@@ -0,0 +1,84 @@
import express from 'express';
import { readFile, writeFile } from 'fs/promises';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
const router = express.Router();
const __dirname = dirname(fileURLToPath(import.meta.url));
const MODELS_PATH = join(__dirname, '../data/models.json');
async function loadModels() {
const raw = await readFile(MODELS_PATH, 'utf-8');
return JSON.parse(raw);
}
async function saveModels(models) {
await writeFile(MODELS_PATH, JSON.stringify(models, null, 2));
}
router.get('/', async (req, res) => {
try {
const models = await loadModels();
res.json(models);
} catch (err) {
res.status(500).json({ error: 'Failed to load models' });
}
});
router.post('/', async (req, res) => {
try {
const { tag, displayName, owner, name, type, avatar, color, description, systemPromptParam, defaultInput } = req.body;
if (!tag || !owner || !name || !displayName) {
return res.status(400).json({ error: 'Missing required fields: tag, owner, name, displayName' });
}
const models = await loadModels();
if (models.find(m => m.tag === tag)) {
return res.status(409).json({ error: `Tag @${tag} already exists` });
}
const newModel = {
id: `${owner}-${name}`.replace(/[^a-z0-9-]/gi, '-'),
tag: tag.toLowerCase().replace(/[^a-z0-9]/g, ''),
displayName,
owner,
name,
type: type || 'text',
avatar: avatar || '🤖',
color: color || '#6B7280',
description: description || '',
systemPromptParam: systemPromptParam || null,
defaultInput: defaultInput || {}
};
models.push(newModel);
await saveModels(models);
res.status(201).json(newModel);
} catch (err) {
res.status(500).json({ error: 'Failed to add model' });
}
});
router.put('/:id', async (req, res) => {
try {
const models = await loadModels();
const idx = models.findIndex(m => m.id === req.params.id);
if (idx === -1) return res.status(404).json({ error: 'Model not found' });
models[idx] = { ...models[idx], ...req.body, id: models[idx].id };
await saveModels(models);
res.json(models[idx]);
} catch (err) {
res.status(500).json({ error: 'Failed to update model' });
}
});
router.delete('/:id', async (req, res) => {
try {
const models = await loadModels();
const filtered = models.filter(m => m.id !== req.params.id);
if (filtered.length === models.length) return res.status(404).json({ error: 'Model not found' });
await saveModels(filtered);
res.json({ success: true });
} catch (err) {
res.status(500).json({ error: 'Failed to delete model' });
}
});
export { router as modelsRouter, loadModels };

117
backend/routes/search.js Normal file
View File

@@ -0,0 +1,117 @@
import express from 'express';
const router = express.Router();
// DuckDuckGo Instant Answer API - no key required
async function ddgInstantAnswer(query) {
const url = `https://api.duckduckgo.com/?q=${encodeURIComponent(query)}&format=json&no_html=1&skip_disambig=1`;
const res = await fetch(url, {
headers: { 'User-Agent': 'IQAI-Dashboard/1.0' }
});
if (!res.ok) throw new Error('DDG search failed');
return res.json();
}
// DuckDuckGo HTML search scraper for more results
async function ddgWebSearch(query) {
const url = `https://html.duckduckgo.com/html/?q=${encodeURIComponent(query)}`;
const res = await fetch(url, {
headers: {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
'Accept': 'text/html'
}
});
if (!res.ok) return [];
const html = await res.text();
// Parse result links and snippets from DDG HTML
const results = [];
const resultRegex = /<a[^>]+class="result__a"[^>]*href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/gi;
const snippetRegex = /<a[^>]+class="result__snippet"[^>]*>([\s\S]*?)<\/a>/gi;
const links = [...html.matchAll(resultRegex)].slice(0, 8);
const snippets = [...html.matchAll(snippetRegex)].slice(0, 8);
for (let i = 0; i < Math.min(links.length, 5); i++) {
const title = links[i][2].replace(/<[^>]+>/g, '').trim();
const snippet = snippets[i] ? snippets[i][1].replace(/<[^>]+>/g, '').trim() : '';
const href = links[i][1];
// DDG redirects through their own URLs, extract the real URL
const realUrl = href.startsWith('//duckduckgo.com/l/?uddg=')
? decodeURIComponent(href.split('uddg=')[1]?.split('&')[0] || href)
: href;
if (title && !title.includes('DuckDuckGo')) {
results.push({ title, snippet, url: realUrl });
}
}
return results;
}
router.get('/', async (req, res) => {
const { q } = req.query;
if (!q) return res.status(400).json({ error: 'Query parameter q is required' });
try {
const [instant, webResults] = await Promise.allSettled([
ddgInstantAnswer(q),
ddgWebSearch(q)
]);
const answer = instant.status === 'fulfilled' ? instant.value : null;
const results = webResults.status === 'fulfilled' ? webResults.value : [];
// Format for injection into AI prompt
const formatted = formatSearchResults(q, answer, results);
res.json({
query: q,
answer,
results,
formatted
});
} catch (err) {
res.status(500).json({ error: err.message });
}
});
function formatSearchResults(query, instant, webResults) {
const lines = [`[WEB SEARCH RESULTS FOR: "${query}"]`, ''];
if (instant?.AbstractText) {
lines.push(`Summary: ${instant.AbstractText}`);
if (instant.AbstractSource) lines.push(`Source: ${instant.AbstractSource}`);
lines.push('');
}
if (instant?.Answer) {
lines.push(`Direct Answer: ${instant.Answer}`);
lines.push('');
}
if (webResults.length > 0) {
lines.push('Web Results:');
webResults.forEach((r, i) => {
lines.push(`${i + 1}. ${r.title}`);
if (r.snippet) lines.push(` ${r.snippet}`);
lines.push(` URL: ${r.url}`);
});
lines.push('');
}
if (instant?.RelatedTopics?.length > 0) {
const topics = instant.RelatedTopics
.filter(t => t.Text)
.slice(0, 3)
.map(t => `- ${t.Text}`);
if (topics.length > 0) {
lines.push('Related Topics:');
lines.push(...topics);
lines.push('');
}
}
lines.push('[END SEARCH RESULTS]');
return lines.join('\n');
}
export { router as searchRouter };