import OpenAI from "openai";
import {
FixtureInstance,
GeneratedScene,
CueSequence,
LightingDesignRequest,
} from "../types/lighting";
import { RAGService } from "./rag-service-simple";
export class AILightingService {
private openai: OpenAI;
private ragService: RAGService;
constructor(ragService: RAGService) {
this.openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
this.ragService = ragService;
}
async generateScene(request: LightingDesignRequest): Promise<GeneratedScene> {
// Get AI recommendations from RAG
const recommendations =
await this.ragService.generateLightingRecommendations(
request.sceneDescription,
request.designPreferences?.mood || "neutral",
request.availableFixtures.map((f) => f.type || "OTHER"),
);
// Generate fixture values using AI
const fixturePrompt = this.buildFixturePrompt(request, recommendations);
const response = await this.openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: fixturePrompt }],
temperature: 0.3,
});
const content = response.choices[0].message.content || "{}";
let aiResponse: any = {};
try {
aiResponse = JSON.parse(content);
} catch (_error) {
// If JSON parsing fails, try to extract JSON from the response
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
aiResponse = JSON.parse(jsonMatch[0]);
} catch (_e) {
// If still fails, use fallback
aiResponse = {};
}
}
}
// Debug logging - embed in response for troubleshooting
const debugInfo = {
promptLength: fixturePrompt.length,
responseLength: content.length,
parsedResponse: !!aiResponse,
hasFixtureValues: !!(
aiResponse.fixtureValues && Array.isArray(aiResponse.fixtureValues)
),
fixtureValuesCount: aiResponse.fixtureValues?.length || 0,
availableFixturesCount: request.availableFixtures.length,
firstFixtureChannelCount: request.availableFixtures[0]?.channelCount || 0,
};
// Validate and clean fixture values to ensure channel IDs exist
const validatedFixtureValues = this.validateFixtureValues(
aiResponse.fixtureValues || [],
request.availableFixtures,
);
return {
name: aiResponse.name || `Scene for ${request.sceneDescription}`,
description: aiResponse.description || request.sceneDescription,
fixtureValues: validatedFixtureValues,
reasoning:
aiResponse.reasoning ||
recommendations.reasoning + `\n\nDEBUG: ${JSON.stringify(debugInfo)}`,
};
}
async generateCueSequence(
scriptContext: string,
scenes: GeneratedScene[],
transitionPreferences?: {
defaultFadeIn: number;
defaultFadeOut: number;
followCues: boolean;
},
): Promise<CueSequence> {
const prompt = `
Create a theatrical cue sequence based on this script context and generated scenes.
Script Context: ${scriptContext}
Generated Scenes:
${scenes.map((scene, i) => `[${i}] ${scene.name}: ${scene.description}`).join("\n")}
Transition Preferences:
- Default Fade In: ${transitionPreferences?.defaultFadeIn || 3}s
- Default Fade Out: ${transitionPreferences?.defaultFadeOut || 3}s
- Follow Cues: ${transitionPreferences?.followCues || false}
Create a cue sequence in this JSON format:
{
"name": "Cue sequence name",
"description": "Sequence description",
"cues": [
{
"name": "Cue name",
"cueNumber": 1.0,
"sceneId": "0", // Use the scene index number from above (0, 1, 2, etc.)
"fadeInTime": 3.0,
"fadeOutTime": 3.0,
"followTime": null or number,
"notes": "Director notes or cue description"
}
],
"reasoning": "Explanation of cue timing and sequencing decisions"
}
Consider:
- Dramatic pacing and story beats
- Smooth transitions between moods
- Technical practicality of fade times
- Standard theatrical cueing practices
- Moments that need manual vs automatic advancement
`;
const response = await this.openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }],
temperature: 0.4,
});
const content = response.choices[0].message.content || "{}";
try {
return JSON.parse(content);
} catch (_error) {
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch (_e) {
return {
name: "Generated Cue Sequence",
description: "Fallback cue sequence due to parsing error",
cues: [],
reasoning: "Unable to parse AI response, using fallback structure",
};
}
}
return {
name: "Generated Cue Sequence",
description: "Fallback cue sequence due to parsing error",
cues: [],
reasoning: "Unable to parse AI response, using fallback structure",
};
}
}
private buildFixturePrompt(
request: LightingDesignRequest,
recommendations: any,
): string {
// Create condensed fixture summaries to reduce token usage
const fixtureDetails = request.availableFixtures
.filter((fixture) => fixture.channels && fixture.channels.length > 0)
.map((fixture) => {
// Direct access to channels - much simpler!
const channels = fixture.channels.map((ch) => `${ch.type}`);
return {
id: fixture.id,
name: fixture.name,
type: fixture.type,
mode: fixture.modeName,
channelCount: fixture.channelCount,
channels: channels.join(","),
};
});
// Limit context length by truncating if too many fixtures
const maxFixtures = 15; // Reduced limit due to channel ID inclusion
const limitedFixtures = fixtureDetails.slice(0, maxFixtures);
const fixtureWarning =
fixtureDetails.length > maxFixtures
? `\n(Showing first ${maxFixtures} of ${fixtureDetails.length} fixtures)`
: "";
const sceneType = request.sceneType || "full";
const isAdditive = sceneType === "additive";
let prompt = `Scene: ${request.sceneDescription}
Mood: ${recommendations.reasoning || "Standard"}
Colors: ${recommendations.colorSuggestions?.join(",") || "Default"}
`;
if (isAdditive) {
// For additive scenes, provide context about other fixtures but only modify specific ones
const limitedIds = new Set(limitedFixtures.map((f) => f.id));
const allFixtureDetails =
request.allFixtures
?.filter((fixture) => fixture.channels && fixture.channels.length > 0)
.map((fixture) => {
const _channels = fixture.channels.map((ch) => `${ch.type}`);
return {
id: fixture.id,
name: fixture.name,
type: fixture.type,
included: limitedIds.has(fixture.id),
};
}) || [];
prompt += `ADDITIVE SCENE: Only modify the specified fixtures below. Other fixtures will remain unchanged.
Fixtures to modify (${limitedFixtures.length} of ${allFixtureDetails.length} total)${fixtureWarning}:
${limitedFixtures.map((f) => `${f.id}: ${f.name} (${f.type}, ${f.mode}) - Channels: ${f.channels}`).join("\n")}
Other fixtures in project (will remain unchanged):
${allFixtureDetails
.filter((f) => !f.included)
.slice(0, 5)
.map((f) => `${f.id}: ${f.name} (${f.type}) - NOT MODIFIED`)
.join(
"\n",
)}${allFixtureDetails.filter((f) => !f.included).length > 5 ? "\n... and more" : ""}
IMPORTANT: Only include fixtureValues for the ${limitedFixtures.length} fixtures listed above to modify.
`;
} else {
prompt += `FULL SCENE: Use ALL fixtures to create a complete lighting state.
Fixtures (use ALL ${limitedFixtures.length} fixtures)${fixtureWarning}:
${limitedFixtures.map((f) => `${f.id}: ${f.name} (${f.type}, ${f.mode}) - Channels: ${f.channels}`).join("\n")}
IMPORTANT: Include values for ALL ${limitedFixtures.length} fixtures above.
`;
}
prompt += `
Return JSON:
{
"name": "Scene name",
"fixtureValues": [
{"fixtureId": "fixture_id", "channels": [{"offset": 0, "value": 255}, {"offset": 1, "value": 128}]}
],
"reasoning": "explanation"
}
For each fixture, provide channels as an array of {offset, value} objects:
- offset: channel position (0, 1, 2, ...) starting from 0
- value: DMX value (0-255) for that channel
- Channels not included will retain their current values (omit channels you don't want to change)
- You can include explicit zero values if needed (e.g., to turn off a specific channel)
`;
return prompt;
}
async optimizeSceneForFixtures(
scene: GeneratedScene,
availableFixtures: FixtureInstance[],
): Promise<GeneratedScene> {
// Validate and optimize the generated scene
const optimizedFixtureValues = scene.fixtureValues.map((fv) => {
const fixture = availableFixtures.find((f) => f.id === fv.fixtureId);
if (!fixture || !fixture.channels) return fv;
// Ensure all channel values are within valid ranges and valid offsets
const channelMap = new Map<number, number>();
// Filter and validate channels, deduplicating by offset (last value wins)
(Array.isArray(fv.channels) ? fv.channels : [])
.filter(
(ch) =>
ch &&
typeof ch === "object" &&
typeof ch.offset === "number" &&
typeof ch.value === "number" &&
ch.offset >= 0 &&
ch.offset < fixture.channelCount,
)
.forEach((ch) => {
const channel = fixture.channels.find((c) => c.offset === ch.offset);
// Clamp to channel's min/max range, or standard DMX range (0-255) if channel not found
const clampedValue = channel
? Math.max(channel.minValue, Math.min(channel.maxValue, ch.value))
: Math.max(0, Math.min(255, ch.value));
// Sparse format: preserve all provided values including explicit zeros
// Channels not in the map will retain current values (backend behavior)
// Last occurrence wins for duplicate offsets
channelMap.set(ch.offset, clampedValue);
});
// Convert map back to array in sparse format
const optimizedChannels = Array.from(channelMap.entries())
.map(([offset, value]) => ({ offset, value }))
.sort((a, b) => a.offset - b.offset); // Sort by offset for consistency
return {
...fv,
channels: optimizedChannels,
};
});
return {
...scene,
fixtureValues: optimizedFixtureValues,
};
}
async suggestFixtureUsage(
sceneContext: string,
availableFixtures: FixtureInstance[],
): Promise<{
primaryFixtures: string[];
supportingFixtures: string[];
unusedFixtures: string[];
reasoning: string;
}> {
const fixtureInfo = availableFixtures.map((f) => ({
id: f.id,
name: f.name,
type: f.type,
tags: f.tags,
position: `Universe ${f.universe}, Channel ${f.startChannel}`,
}));
const prompt = `
Analyze these available fixtures and suggest which ones to use for this scene.
Scene Context: ${sceneContext}
Available Fixtures:
${JSON.stringify(fixtureInfo, null, 2)}
Recommend fixture usage in this JSON format:
{
"primaryFixtures": ["fixture_ids for main lighting"],
"supportingFixtures": ["fixture_ids for accent/fill lighting"],
"unusedFixtures": ["fixture_ids not needed for this scene"],
"reasoning": "Explanation of fixture selection strategy"
}
Consider:
- Fixture types and capabilities
- Positioning and coverage
- Scene requirements and mood
- Efficient use of available equipment
- Standard lighting practices (key, fill, back light)
`;
const response = await this.openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }],
temperature: 0.3,
});
const content = response.choices[0].message.content || "{}";
try {
return JSON.parse(content);
} catch (_error) {
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
try {
return JSON.parse(jsonMatch[0]);
} catch (_e) {
return {
primaryFixtures: [],
supportingFixtures: [],
unusedFixtures: [],
reasoning: "Unable to parse AI response, using fallback structure",
};
}
}
return {
primaryFixtures: [],
supportingFixtures: [],
unusedFixtures: [],
reasoning: "Unable to parse AI response, using fallback structure",
};
}
}
private validateFixtureValues(
fixtureValues: any[],
availableFixtures: FixtureInstance[],
): Array<{
fixtureId: string;
channels: { offset: number; value: number; }[];
}> {
if (!Array.isArray(fixtureValues)) {
return [];
}
const validatedValues: Array<{
fixtureId: string;
channels: { offset: number; value: number; }[];
}> = [];
for (const fv of fixtureValues) {
if (!fv || typeof fv !== "object" || !fv.fixtureId) {
continue;
}
// Find the fixture to validate against
const fixture = availableFixtures.find((f) => f.id === fv.fixtureId);
if (!fixture || !fixture.channels) {
continue; // Skip invalid fixture IDs or missing channels
}
// Handle both sparse format and legacy array format from AI
// Use a Map to deduplicate by offset (last value wins) and preserve provided values
const channelMap = new Map<number, number>();
if (Array.isArray(fv.channelValues)) {
// Legacy format: simple array of numbers - convert to sparse format
fv.channelValues.forEach((value: any, offset: number) => {
const numValue = Math.max(0, Math.min(255, Number(value) || 0));
if (offset >= 0 && offset < fixture.channelCount) {
// Preserve all provided values (including explicit zeros if AI specified them)
channelMap.set(offset, numValue);
}
});
} else if (Array.isArray(fv.channels)) {
// New sparse format: array of {offset, value} objects
fv.channels
.filter((ch: { offset: number; value: number }) => ch && typeof ch === "object" && typeof ch.offset === "number" && typeof ch.value === "number")
.forEach((ch: { offset: number; value: number }) => {
const numValue = Math.max(0, Math.min(255, Number(ch.value) || 0));
if (ch.offset >= 0 && ch.offset < fixture.channelCount) {
// Preserve all provided values (including explicit zeros if AI specified them)
// Last occurrence wins for duplicate offsets
channelMap.set(ch.offset, numValue);
}
});
} else if (fv.channelValues && typeof fv.channelValues === "object") {
// Very legacy format: array of {channelId, value} objects
// Convert to sparse format based on channel offsets
// Ensure we have an iterable array before using for...of
const legacyValues = Array.isArray(fv.channelValues)
? fv.channelValues
: [];
for (const cv of legacyValues) {
if (cv && typeof cv === "object" && cv.channelId) {
// Find the channel to get its offset
const channel = fixture.channels.find(
(ch) => ch.id === cv.channelId,
);
if (channel) {
const value = Math.max(0, Math.min(255, Number(cv.value) || 0));
// Preserve all provided values (including explicit zeros if AI specified them)
// Last occurrence wins for duplicate offsets
channelMap.set(channel.offset, value);
}
}
}
}
// Convert map to array in sparse format, preserving all provided values
const channels = Array.from(channelMap.entries())
.map(([offset, value]) => ({ offset, value }))
.sort((a, b) => a.offset - b.offset); // Sort by offset for consistency
// Only add if we have at least one channel value
if (channels.length > 0) {
validatedValues.push({
fixtureId: fv.fixtureId,
channels,
});
}
}
return validatedValues;
}
}