Reubencf commited on
Commit
a33dded
·
verified ·
1 Parent(s): 77fa073

Upload 53 files

Browse files
.dockerignore ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dependencies
2
+ node_modules
3
+ npm-debug.log*
4
+ yarn-debug.log*
5
+ yarn-error.log*
6
+
7
+ # Next.js
8
+ .next/
9
+ out/
10
+
11
+ # Production
12
+ build
13
+ dist
14
+
15
+ # Environment variables
16
+ .env*
17
+
18
+ # Debug
19
+ npm-debug.log*
20
+ yarn-debug.log*
21
+ yarn-error.log*
22
+
23
+ # Vercel
24
+ .vercel
25
+
26
+ # TypeScript
27
+ *.tsbuildinfo
28
+ next-env.d.ts
29
+
30
+ # OS generated files
31
+ .DS_Store
32
+ .DS_Store?
33
+ ._*
34
+ .Spotlight-V100
35
+ .Trashes
36
+ ehthumbs.db
37
+ Thumbs.db
38
+
39
+ # IDE
40
+ .vscode
41
+ .idea
42
+ *.swp
43
+ *.swo
44
+
45
+ # Logs
46
+ logs
47
+ *.log
48
+
49
+ # Git
50
+ .git
51
+ .gitignore
52
+ README.md
53
+
54
+ # Docker
55
+ Dockerfile
56
+ .dockerignore
README.md CHANGED
@@ -5,14 +5,7 @@ colorFrom: yellow
5
  colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
- app_port: 7860
9
- hf_oauth: true
10
- # optional, default duration is 8 hours/480 minutes. Max duration is 30 days/43200 minutes.
11
- hf_oauth_expiration_minutes: 480
12
- # optional, see "Scopes" below. "openid profile" is always included.
13
- hf_oauth_scopes:
14
- - manage-repos
15
- - inference-api
16
  ---
17
 
18
  # Nano Banana Node Editor 🍌
 
5
  colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
+ app_port: 3000
 
 
 
 
 
 
 
9
  ---
10
 
11
  # Nano Banana Node Editor 🍌
app/api/hf-process/route.ts ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * API ROUTE: /api/hf-process
3
+ *
4
+ * HuggingFace model processing endpoint for the Nano Banana Editor.
5
+ * Handles image editing and generation using HuggingFace models.
6
+ *
7
+ * Supported Models:
8
+ * - black-forest-labs/FLUX.1-Kontext-dev: Image editing with context understanding
9
+ * - Qwen/Qwen-Image-Edit: Powerful image editing model
10
+ * - black-forest-labs/FLUX.1-dev: Text-to-image generation
11
+ *
12
+ * IMPORTANT LIMITATIONS:
13
+ * - These models only accept SINGLE images for editing
14
+ * - MERGE operations require Nano Banana Pro (Gemini API) which accepts multiple images
15
+ * - Text-to-image (FLUX.1-dev) doesn't require input images
16
+ */
17
+
18
+ import { NextRequest, NextResponse } from "next/server";
19
+ import { cookies } from "next/headers";
20
+ import { HfInference } from "@huggingface/inference";
21
+
22
+ // Configure Next.js runtime
23
+ export const runtime = "nodejs";
24
+
25
+ // Set maximum execution time for AI operations
26
+ export const maxDuration = 60;
27
+
28
+ /**
29
+ * Available HuggingFace models with their capabilities
30
+ */
31
+ export const HF_MODELS = {
32
+ "FLUX.1-Kontext-dev": {
33
+ id: "black-forest-labs/FLUX.1-Kontext-dev",
34
+ name: "FLUX.1 Kontext",
35
+ type: "image-to-image",
36
+ description: "Advanced image editing with context understanding",
37
+ supportsNodes: ["BACKGROUND", "CLOTHES", "STYLE", "EDIT", "CAMERA", "AGE", "FACE", "LIGHTNING", "POSES"],
38
+ },
39
+ "Qwen-Image-Edit": {
40
+ id: "Qwen/Qwen-Image-Edit",
41
+ name: "Qwen Image Edit",
42
+ type: "image-to-image",
43
+ description: "Powerful image editing and manipulation",
44
+ supportsNodes: ["BACKGROUND", "CLOTHES", "STYLE", "EDIT", "CAMERA", "AGE", "FACE", "LIGHTNING", "POSES"],
45
+ },
46
+ "FLUX.1-dev": {
47
+ id: "black-forest-labs/FLUX.1-dev",
48
+ name: "FLUX.1 Dev",
49
+ type: "text-to-image",
50
+ description: "High-quality text-to-image generation",
51
+ supportsNodes: ["CHARACTER"], // Only for generating new images
52
+ },
53
+ };
54
+
55
+ /**
56
+ * Parse base64 data URL into components
57
+ */
58
+ function parseDataUrl(dataUrl: string): { mimeType: string; data: string } | null {
59
+ const match = dataUrl.match(/^data:(.*?);base64,(.*)$/);
60
+ if (!match) return null;
61
+ return {
62
+ mimeType: match[1] || "image/png",
63
+ data: match[2]
64
+ };
65
+ }
66
+
67
+ /**
68
+ * Convert base64 to Blob for HuggingFace API
69
+ */
70
+ function base64ToBlob(base64: string, mimeType: string): Blob {
71
+ const byteCharacters = atob(base64);
72
+ const byteNumbers = new Array(byteCharacters.length);
73
+ for (let i = 0; i < byteCharacters.length; i++) {
74
+ byteNumbers[i] = byteCharacters.charCodeAt(i);
75
+ }
76
+ const byteArray = new Uint8Array(byteNumbers);
77
+ return new Blob([byteArray], { type: mimeType });
78
+ }
79
+
80
+ /**
81
+ * Main POST handler for HuggingFace model processing
82
+ */
83
+ export async function POST(req: NextRequest) {
84
+ try {
85
+ // Parse request body
86
+ let body: {
87
+ type: string;
88
+ model: string;
89
+ image?: string;
90
+ prompt?: string;
91
+ params?: any;
92
+ };
93
+
94
+ try {
95
+ body = await req.json();
96
+ } catch (jsonError) {
97
+ console.error('[HF-API] Failed to parse JSON:', jsonError);
98
+ return NextResponse.json(
99
+ { error: "Invalid JSON in request body" },
100
+ { status: 400 }
101
+ );
102
+ }
103
+
104
+ // Get HF token from cookies
105
+ let hfToken: string | null = null;
106
+ try {
107
+ const cookieStore = await cookies();
108
+ const tokenCookie = cookieStore.get('hf_token');
109
+ hfToken = tokenCookie?.value || null;
110
+ } catch (error) {
111
+ console.error('Error reading HF token:', error);
112
+ }
113
+
114
+ if (!hfToken) {
115
+ return NextResponse.json(
116
+ { error: "Please login with HuggingFace to use HF models. Click 'Login with HuggingFace' in the header." },
117
+ { status: 401 }
118
+ );
119
+ }
120
+
121
+ // Validate model selection
122
+ const modelKey = body.model as keyof typeof HF_MODELS;
123
+ const modelConfig = HF_MODELS[modelKey];
124
+
125
+ if (!modelConfig) {
126
+ return NextResponse.json(
127
+ { error: `Invalid model: ${body.model}. Available models: ${Object.keys(HF_MODELS).join(", ")}` },
128
+ { status: 400 }
129
+ );
130
+ }
131
+
132
+ // Check for MERGE - not supported with HF models
133
+ if (body.type === "MERGE") {
134
+ return NextResponse.json(
135
+ {
136
+ error: "MERGE operations require Nano Banana Pro (Gemini API). HuggingFace models only accept single images. Please switch to 'Nano Banana Pro' mode and enter your Google Gemini API key to use MERGE functionality.",
137
+ requiresNanoBananaPro: true
138
+ },
139
+ { status: 400 }
140
+ );
141
+ }
142
+
143
+ // Initialize HuggingFace client
144
+ const hf = new HfInference(hfToken);
145
+
146
+ // Handle text-to-image generation (FLUX.1-dev)
147
+ if (modelConfig.type === "text-to-image") {
148
+ const prompt = body.prompt || body.params?.characterDescription || "A professional portrait photo";
149
+
150
+ try {
151
+ const result = await hf.textToImage({
152
+ model: modelConfig.id,
153
+ inputs: prompt,
154
+ parameters: {
155
+ num_inference_steps: 28,
156
+ guidance_scale: 3.5,
157
+ },
158
+ });
159
+
160
+ // Result is a Blob, convert to base64
161
+ const resultBlob = result as unknown as Blob;
162
+ const arrayBuffer = await resultBlob.arrayBuffer();
163
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
164
+ const dataUrl = `data:image/png;base64,${base64}`;
165
+
166
+ return NextResponse.json({ image: dataUrl });
167
+ } catch (hfError: any) {
168
+ console.error('[HF-API] Text-to-image error:', hfError);
169
+ return NextResponse.json(
170
+ { error: `HuggingFace API error: ${hfError.message || 'Unknown error'}` },
171
+ { status: 500 }
172
+ );
173
+ }
174
+ }
175
+
176
+ // Handle image-to-image editing
177
+ if (modelConfig.type === "image-to-image") {
178
+ // Validate input image
179
+ if (!body.image) {
180
+ return NextResponse.json(
181
+ { error: "No input image provided. Please connect an image source to this node." },
182
+ { status: 400 }
183
+ );
184
+ }
185
+
186
+ const parsed = parseDataUrl(body.image);
187
+ if (!parsed) {
188
+ return NextResponse.json(
189
+ { error: "Invalid image format. Please use a valid image." },
190
+ { status: 400 }
191
+ );
192
+ }
193
+
194
+ // Build the editing prompt from parameters
195
+ const prompts: string[] = [];
196
+ const params = body.params || {};
197
+
198
+ // Background modifications
199
+ if (params.backgroundType) {
200
+ if (params.backgroundType === "color") {
201
+ prompts.push(`Change the background to a solid ${params.backgroundColor || "white"} color.`);
202
+ } else if (params.backgroundType === "custom" && params.customPrompt) {
203
+ prompts.push(params.customPrompt);
204
+ } else if (params.backgroundType === "city") {
205
+ prompts.push(`Place the person in a ${params.citySceneType || "busy city street"} during ${params.cityTimeOfDay || "daytime"}.`);
206
+ }
207
+ }
208
+
209
+ // Style application
210
+ if (params.stylePreset) {
211
+ const styleMap: { [key: string]: string } = {
212
+ "90s-anime": "Transform into 90s anime art style",
213
+ "mha": "Convert into My Hero Academia anime style",
214
+ "dbz": "Convert into Dragon Ball Z anime style",
215
+ "ukiyo-e": "Convert into Japanese Ukiyo-e woodblock print style",
216
+ "cubism": "Convert into Cubist art style",
217
+ "van-gogh": "Convert into Van Gogh post-impressionist style",
218
+ "simpsons": "Convert into The Simpsons cartoon style",
219
+ "family-guy": "Convert into Family Guy animation style",
220
+ "pixar": "Convert into Pixar animation style",
221
+ "manga": "Convert into Manga style",
222
+ };
223
+ const styleDescription = styleMap[params.stylePreset] || `Apply ${params.stylePreset} style`;
224
+ prompts.push(`${styleDescription} at ${params.styleStrength || 50}% intensity.`);
225
+ }
226
+
227
+ // Edit prompt
228
+ if (params.editPrompt) {
229
+ prompts.push(params.editPrompt);
230
+ }
231
+
232
+ // Age transformation
233
+ if (params.targetAge) {
234
+ prompts.push(`Transform the person to look ${params.targetAge} years old.`);
235
+ }
236
+
237
+ // Face modifications
238
+ if (params.faceOptions) {
239
+ const face = params.faceOptions;
240
+ const modifications: string[] = [];
241
+ if (face.removePimples) modifications.push("remove pimples");
242
+ if (face.addSunglasses) modifications.push("add sunglasses");
243
+ if (face.addHat) modifications.push("add a hat");
244
+ if (face.changeHairstyle) modifications.push(`change hairstyle to ${face.changeHairstyle}`);
245
+ if (face.facialExpression) modifications.push(`change expression to ${face.facialExpression}`);
246
+ if (modifications.length > 0) {
247
+ prompts.push(`Face modifications: ${modifications.join(", ")}`);
248
+ }
249
+ }
250
+
251
+ // Lighting effects
252
+ if (params.lightingPrompt) {
253
+ prompts.push(`Apply lighting: ${params.lightingPrompt}`);
254
+ }
255
+
256
+ // Pose modifications
257
+ if (params.posePrompt) {
258
+ prompts.push(`Change pose to: ${params.posePrompt}`);
259
+ }
260
+
261
+ const finalPrompt = prompts.length > 0
262
+ ? prompts.join(" ")
263
+ : body.prompt || "Enhance this image with high quality output.";
264
+
265
+ try {
266
+ // Convert base64 to blob for HF API
267
+ const imageBlob = base64ToBlob(parsed.data, parsed.mimeType);
268
+
269
+ // Use image-to-image endpoint
270
+ const result = await hf.imageToImage({
271
+ model: modelConfig.id,
272
+ inputs: imageBlob,
273
+ parameters: {
274
+ prompt: finalPrompt,
275
+ num_inference_steps: 28,
276
+ guidance_scale: 7.5,
277
+ strength: 0.75,
278
+ },
279
+ });
280
+
281
+ // Convert result blob to base64
282
+ const arrayBuffer = await result.arrayBuffer();
283
+ const base64 = Buffer.from(arrayBuffer).toString('base64');
284
+ const dataUrl = `data:image/png;base64,${base64}`;
285
+
286
+ return NextResponse.json({ image: dataUrl });
287
+ } catch (hfError: any) {
288
+ console.error('[HF-API] Image-to-image error:', hfError);
289
+
290
+ // Provide helpful error messages
291
+ if (hfError.message?.includes('401') || hfError.message?.includes('unauthorized')) {
292
+ return NextResponse.json(
293
+ { error: "HuggingFace authentication failed. Please logout and login again." },
294
+ { status: 401 }
295
+ );
296
+ }
297
+
298
+ if (hfError.message?.includes('Model') && hfError.message?.includes('not')) {
299
+ return NextResponse.json(
300
+ { error: `Model ${modelConfig.id} is not available or requires a Pro subscription.` },
301
+ { status: 503 }
302
+ );
303
+ }
304
+
305
+ return NextResponse.json(
306
+ { error: `HuggingFace API error: ${hfError.message || 'Unknown error'}` },
307
+ { status: 500 }
308
+ );
309
+ }
310
+ }
311
+
312
+ return NextResponse.json(
313
+ { error: "Unsupported operation type" },
314
+ { status: 400 }
315
+ );
316
+
317
+ } catch (err: any) {
318
+ console.error("/api/hf-process error:", err);
319
+ return NextResponse.json(
320
+ { error: `Failed to process: ${err?.message || 'Unknown error'}` },
321
+ { status: 500 }
322
+ );
323
+ }
324
+ }
325
+
326
+ /**
327
+ * GET handler to return available models and their capabilities
328
+ */
329
+ export async function GET() {
330
+ return NextResponse.json({
331
+ models: HF_MODELS,
332
+ note: "MERGE operations require Nano Banana Pro (Gemini API) as it needs multi-image input which HuggingFace models don't support."
333
+ });
334
+ }
app/api/improve-prompt/route.ts CHANGED
@@ -18,7 +18,6 @@ export async function POST(req: NextRequest) {
18
  const body = await req.json() as {
19
  prompt: string;
20
  type?: string; // 'background', 'edit', etc.
21
- apiToken?: string; // User's Google AI API token
22
  };
23
 
24
  if (!body.prompt?.trim()) {
@@ -38,11 +37,11 @@ export async function POST(req: NextRequest) {
38
  console.error('Error reading HF token from cookies:', error);
39
  }
40
 
41
- // Validate and retrieve Google API key from user input or environment
42
- const apiKey = body.apiToken || process.env.GOOGLE_API_KEY;
43
  if (!apiKey || apiKey === 'your_actual_api_key_here') {
44
  return NextResponse.json(
45
- { error: `API key not provided. Please ${isHfProUser ? 'enter your Google Gemini API token in the top right' : 'login with HF Pro or enter your Google Gemini API token'}.` },
46
  { status: 500 }
47
  );
48
  }
 
18
  const body = await req.json() as {
19
  prompt: string;
20
  type?: string; // 'background', 'edit', etc.
 
21
  };
22
 
23
  if (!body.prompt?.trim()) {
 
37
  console.error('Error reading HF token from cookies:', error);
38
  }
39
 
40
+ // Get API key
41
+ const apiKey = process.env.GOOGLE_API_KEY;
42
  if (!apiKey || apiKey === 'your_actual_api_key_here') {
43
  return NextResponse.json(
44
+ { error: `API key not configured. Please ${isHfProUser ? 'contact support' : 'login with HF Pro'}.` },
45
  { status: 500 }
46
  );
47
  }
app/api/process/route.ts CHANGED
@@ -487,7 +487,6 @@ The result should look like all subjects were photographed together in the same
487
  // Build cinematic camera prompt for professional, movie-like results
488
  let cameraPrompt = "CINEMATIC CAMERA TRANSFORMATION: Transform this image into a professional, cinematic photograph with movie-quality production values";
489
 
490
- // 1. Focal Length (Lens Choice) - First priority as it defines perspective
491
  if (params.focalLength) {
492
  if (params.focalLength === "8mm") {
493
  cameraPrompt += " shot with an ultra-wide 8mm fisheye lens creating dramatic barrel distortion, immersive perspective, and cinematic edge curvature typical of action sequences";
@@ -510,155 +509,77 @@ The result should look like all subjects were photographed together in the same
510
  }
511
  }
512
 
513
- // 2. Aperture (Depth of Field Control) - Core exposure triangle component
514
  if (params.aperture) {
515
- if (params.aperture === "f/0.95") {
516
- cameraPrompt += `, shot wide open at f/0.95 for extreme shallow depth of field, ethereal bokeh, and cinematic subject isolation with dreamy background blur`;
517
- } else if (params.aperture === "f/1.2") {
518
- cameraPrompt += `, captured at f/1.2 for beautiful shallow depth of field, creating that signature cinematic look with smooth background separation and creamy bokeh`;
519
  } else if (params.aperture === "f/1.4") {
520
- cameraPrompt += `, shot at f/1.4 for controlled shallow depth of field, maintaining subject sharpness while creating pleasing background blur and professional bokeh quality`;
521
- } else if (params.aperture === "f/1.8") {
522
- cameraPrompt += `, captured at f/1.8 for balanced depth of field, keeping key subjects sharp while maintaining smooth background separation and natural bokeh`;
523
- } else if (params.aperture === "f/2") {
524
- cameraPrompt += `, shot at f/2 for moderate depth of field with excellent subject isolation, maintaining cinematic quality and professional sharpness`;
525
  } else if (params.aperture === "f/2.8") {
526
- cameraPrompt += `, photographed at f/2.8 for optimal lens sharpness with controlled depth of field, ideal for professional portrait and documentary work`;
527
  } else if (params.aperture === "f/4") {
528
- cameraPrompt += `, captured at f/4 for extended depth of field with excellent overall sharpness, perfect for group portraits and environmental photography`;
529
  } else if (params.aperture === "f/5.6") {
530
- cameraPrompt += `, shot at f/5.6 for deep focus with maximum lens sharpness, ideal for landscape and architectural photography where detail is paramount`;
531
- } else if (params.aperture === "f/8") {
532
- cameraPrompt += `, photographed at f/8 for optimal depth of field and corner-to-corner sharpness, the sweet spot for landscape and travel photography`;
533
- } else if (params.aperture === "f/11") {
534
- cameraPrompt += `, captured at f/11 for extensive depth of field with front-to-back sharpness, perfect for sweeping landscapes and architectural details`;
535
- } else if (params.aperture === "f/16") {
536
- cameraPrompt += `, shot at f/16 for maximum depth of field with hyperfocal distance focusing, ensuring sharp detail from foreground to infinity`;
537
- } else if (params.aperture === "f/22") {
538
- cameraPrompt += `, photographed at f/22 for extreme depth of field with starburst effects on light sources, creating dramatic landscape and architectural imagery`;
539
  } else {
540
- cameraPrompt += `, professionally exposed at ${params.aperture} with carefully controlled depth of field for optimal image quality`;
541
  }
542
  }
543
 
544
- // 3. Shutter Speed (Motion Control) - Core exposure triangle component
545
- if (params.shutterSpeed) {
546
- if (params.shutterSpeed === "1/1000s") {
547
- cameraPrompt += ", captured at 1/1000s shutter speed to freeze fast action and eliminate motion blur with tack-sharp precision";
548
- } else if (params.shutterSpeed === "1/250s") {
549
- cameraPrompt += ", shot at 1/250s shutter speed for optimal handheld photography with sharp subjects and minimal camera shake";
550
- } else if (params.shutterSpeed === "1/30s") {
551
- cameraPrompt += ", photographed at 1/30s shutter speed creating subtle motion blur while maintaining subject recognition";
552
- } else if (params.shutterSpeed === "1/15s") {
553
- cameraPrompt += ", captured at 1/15s shutter speed for intentional motion blur effects and dynamic movement trails";
554
- } else if (params.shutterSpeed === "5s") {
555
- cameraPrompt += ", shot with 5-second long exposure creating smooth motion blur, light trails, and ethereal atmospheric effects";
556
- } else {
557
- cameraPrompt += `, captured with ${params.shutterSpeed} shutter speed for controlled motion and exposure effects`;
558
- }
559
- }
560
-
561
- // 4. ISO (Sensor Sensitivity) - Final exposure triangle component
562
  if (params.iso) {
563
  if (params.iso === "ISO 100") {
564
  cameraPrompt += ", shot at ISO 100 for pristine image quality, zero noise, and maximum dynamic range typical of high-end cinema cameras";
 
 
565
  } else if (params.iso === "ISO 400") {
566
  cameraPrompt += ", filmed at ISO 400 for balanced exposure with minimal noise, the sweet spot for most cinematic scenarios";
567
- } else if (params.iso === "ISO 1600") {
 
 
568
  cameraPrompt += ", captured at ISO 1600 with controlled grain for dramatic low-light cinematography and moody atmosphere";
569
- } else if (params.iso === "ISO 6400") {
570
- cameraPrompt += ", filmed at ISO 6400 with artistic grain structure for gritty, realistic cinema aesthetics";
571
  } else {
572
  cameraPrompt += `, shot at ${params.iso} with appropriate noise characteristics`;
573
  }
574
  }
575
 
576
- // 5. White Balance (Color Temperature) - Color accuracy foundation
577
- if(params.whiteBalance) {
578
- cameraPrompt += `, shot with ${params.whiteBalance} white balance`;
579
- }
580
-
581
- // 6. Lighting Setup (Illumination Style) - Mood and atmosphere
582
  if (params.lighting) {
583
- if (params.lighting === "Natural Light") {
584
- cameraPrompt += ", naturally lit with soft, diffused daylight providing even illumination and organic shadow patterns";
585
- } else if (params.lighting === "Golden Hour") {
586
  cameraPrompt += ", cinematically lit during golden hour with warm, directional sunlight creating magical rim lighting, long shadows, and that coveted cinematic glow";
587
  } else if (params.lighting === "Blue Hour") {
588
  cameraPrompt += ", captured during blue hour with soft, even twilight illumination and cool color temperature for moody cinematic atmosphere";
589
- } else if (params.lighting === "Studio Lighting") {
590
  cameraPrompt += ", professionally lit with multi-point studio lighting setup featuring key light, fill light, and rim light for commercial cinema quality";
591
- } else if (params.lighting === "Rembrandt") {
592
- cameraPrompt += ", lit with Rembrandt lighting creating a distinctive triangle of light on the cheek with dramatic chiaroscuro contrast between light and shadow";
593
- } else if (params.lighting === "Split Lighting") {
594
- cameraPrompt += ", illuminated with split lighting dividing the face into equal halves of light and shadow for dramatic and mysterious effect";
595
- } else if (params.lighting === "Butterfly Lighting") {
596
- cameraPrompt += ", lit with butterfly lighting from above creating a butterfly-shaped shadow under the nose for flattering beauty and glamour portraits";
597
- } else if (params.lighting === "Loop Lighting") {
598
- cameraPrompt += ", illuminated with loop lighting creating a small loop-shaped shadow from the nose for natural and versatile portrait lighting";
599
- } else if (params.lighting === "Rim Lighting") {
600
- cameraPrompt += ", backlit with rim lighting creating a glowing outline around the subject for dramatic separation and three-dimensional depth";
601
- } else if (params.lighting === "Silhouette") {
602
- cameraPrompt += ", backlit for silhouette effect with the subject outlined against bright background while facial details remain in shadow";
603
- } else if (params.lighting === "High Key") {
604
- cameraPrompt += ", lit with high-key lighting using bright, even illumination with minimal shadows for optimistic and clean aesthetic";
605
- } else if (params.lighting === "Low Key") {
606
- cameraPrompt += ", dramatically lit with low-key lighting emphasizing deep shadows and high contrast for moody and mysterious atmosphere";
607
  } else {
608
  cameraPrompt += `, professionally lit with ${params.lighting} lighting setup`;
609
  }
610
  }
611
 
612
- // 7. Camera Angle (Perspective and Composition) - Visual storytelling
613
- if (params.angle) {
614
- if (params.angle === "low angle") {
615
- cameraPrompt += ", shot from a low-angle perspective looking upward to convey power, dominance, and heroic stature";
616
- } else if (params.angle === "bird's eye") {
617
- cameraPrompt += ", captured from a bird's eye view directly overhead to show scale, context, and spatial relationships";
618
- } else if (params.angle === "high angle") {
619
- cameraPrompt += ", shot from a high-angle perspective looking downward to create vulnerability and diminish subject importance";
620
- } else if (params.angle === "eye level") {
621
- cameraPrompt += ", shot from eye level for a neutral, natural perspective that connects with the viewer";
622
- } else if (params.angle === "over the shoulder") {
623
- cameraPrompt += ", captured from an over-the-shoulder angle to establish character relationships and dialogue dynamics";
624
- } else if (params.angle === "POV") {
625
- cameraPrompt += ", shot from a first-person POV perspective to immerse the viewer in the subject's experience";
626
- } else if (params.angle === "Dutch tilt") {
627
- cameraPrompt += ", shot with a Dutch tilt angle creating diagonal lines to convey unease, tension, and disorientation";
628
- } else if (params.angle === "worm's eye") {
629
- cameraPrompt += ", captured from a worm's eye view at ground level looking up for extreme dramatic impact and scale";
630
- } else {
631
- cameraPrompt += `, shot from ${params.angle} camera angle`;
632
- }
633
- }
634
-
635
- // 8. Bokeh Quality (Out-of-Focus Rendering) - Aesthetic enhancement
636
  if (params.bokeh) {
637
  if (params.bokeh === "Smooth Bokeh") {
638
  cameraPrompt += ", featuring silky smooth bokeh with perfectly circular out-of-focus highlights and creamy background transitions";
639
  } else if (params.bokeh === "Swirly Bokeh") {
640
- cameraPrompt += ", featuring artistic swirly bokeh with spiral-like background blur patterns and rotational distortion effects typical of vintage Petzval-style lenses";
641
  } else if (params.bokeh === "Hexagonal Bokeh") {
642
- cameraPrompt += ", featuring hexagonal bokeh with geometric six-sided highlight shapes formed by straight aperture blades typical of cinema lenses";
643
- } else if (params.bokeh === "Cat Eye Bokeh") {
644
- cameraPrompt += ", featuring cat's eye bokeh with elliptical highlight distortion toward frame edges caused by optical vignetting and field curvature";
645
- } else if (params.bokeh === "Bubble Bokeh") {
646
- cameraPrompt += ", featuring soap bubble bokeh with bright-edged circular highlights and hollow centers characteristic of Meyer Optik Trioplan lenses";
647
- } else if (params.bokeh === "Creamy Bokeh") {
648
- cameraPrompt += ", featuring creamy bokeh with smooth gradient transitions and soft edge rendering for professional portrait aesthetics";
649
  } else {
650
  cameraPrompt += `, featuring ${params.bokeh} quality bokeh rendering in out-of-focus areas`;
651
  }
652
  }
653
 
654
- // 9. Motion Blur Effects (Dynamic Movement) - Creative motion control
655
  if (params.motionBlur) {
656
  if (params.motionBlur === "Light Motion Blur") {
657
  cameraPrompt += ", with subtle motion blur suggesting gentle movement and adding cinematic flow to the image";
658
  } else if (params.motionBlur === "Medium Motion Blur") {
659
  cameraPrompt += ", with moderate motion blur creating dynamic energy and sense of movement typical of action cinematography";
660
  } else if (params.motionBlur === "Heavy Motion Blur") {
661
- cameraPrompt += ", with pronounced motion blur creating dramatic movement streaks and high-energy cinematic action, giving the moving background subjects a sense of motion blur movement";
662
  } else if (params.motionBlur === "Radial Blur") {
663
  cameraPrompt += ", with radial motion blur emanating from the center, creating explosive zoom-like movement and dramatic focus pull";
664
  } else if (params.motionBlur === "Zoom Blur") {
@@ -668,7 +589,16 @@ The result should look like all subjects were photographed together in the same
668
  }
669
  }
670
 
671
- // 10. Film Style Processing (Post-Production Look) - Final aesthetic treatment
 
 
 
 
 
 
 
 
 
672
  if (params.filmStyle && params.filmStyle !== "RAW") {
673
  cameraPrompt += `, processed with ${params.filmStyle} film aesthetic and color grading`;
674
  } else if (params.filmStyle === "RAW") {
 
487
  // Build cinematic camera prompt for professional, movie-like results
488
  let cameraPrompt = "CINEMATIC CAMERA TRANSFORMATION: Transform this image into a professional, cinematic photograph with movie-quality production values";
489
 
 
490
  if (params.focalLength) {
491
  if (params.focalLength === "8mm") {
492
  cameraPrompt += " shot with an ultra-wide 8mm fisheye lens creating dramatic barrel distortion, immersive perspective, and cinematic edge curvature typical of action sequences";
 
509
  }
510
  }
511
 
 
512
  if (params.aperture) {
513
+ if (params.aperture === "f/1.2") {
514
+ cameraPrompt += `, shot wide open at f/1.2 for extreme shallow depth of field, ethereal bokeh, and cinematic subject isolation with dreamy background blur`;
 
 
515
  } else if (params.aperture === "f/1.4") {
516
+ cameraPrompt += `, captured at f/1.4 for beautiful shallow depth of field, creating that signature cinematic look with smooth background separation`;
 
 
 
 
517
  } else if (params.aperture === "f/2.8") {
518
+ cameraPrompt += `, shot at f/2.8 for controlled depth of field, maintaining subject sharpness while creating pleasing background blur`;
519
  } else if (params.aperture === "f/4") {
520
+ cameraPrompt += `, filmed at f/4 for balanced depth of field, keeping key subjects sharp while maintaining some background separation`;
521
  } else if (params.aperture === "f/5.6") {
522
+ cameraPrompt += `, captured at f/5.6 for extended depth of field while maintaining cinematic quality and professional sharpness`;
523
+ } else if (params.aperture === "f/8" || params.aperture === "f/11") {
524
+ cameraPrompt += `, shot at ${params.aperture} for deep focus cinematography with tack-sharp details throughout the entire frame`;
 
 
 
 
 
 
525
  } else {
526
+ cameraPrompt += `, professionally exposed at ${params.aperture}`;
527
  }
528
  }
529
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530
  if (params.iso) {
531
  if (params.iso === "ISO 100") {
532
  cameraPrompt += ", shot at ISO 100 for pristine image quality, zero noise, and maximum dynamic range typical of high-end cinema cameras";
533
+ } else if (params.iso === "ISO 200") {
534
+ cameraPrompt += ", captured at ISO 200 for clean shadows and optimal color reproduction with professional cinema camera characteristics";
535
  } else if (params.iso === "ISO 400") {
536
  cameraPrompt += ", filmed at ISO 400 for balanced exposure with minimal noise, the sweet spot for most cinematic scenarios";
537
+ } else if (params.iso === "ISO 800") {
538
+ cameraPrompt += ", shot at ISO 800 creating subtle film grain texture that adds cinematic character and organic feel";
539
+ } else if (params.iso === "ISO 1600") {
540
  cameraPrompt += ", captured at ISO 1600 with controlled grain for dramatic low-light cinematography and moody atmosphere";
541
+ } else if (params.iso === "ISO 3200") {
542
+ cameraPrompt += ", filmed at ISO 3200 with artistic grain structure for gritty, realistic cinema aesthetics";
543
  } else {
544
  cameraPrompt += `, shot at ${params.iso} with appropriate noise characteristics`;
545
  }
546
  }
547
 
 
 
 
 
 
 
548
  if (params.lighting) {
549
+ if (params.lighting === "Golden Hour") {
 
 
550
  cameraPrompt += ", cinematically lit during golden hour with warm, directional sunlight creating magical rim lighting, long shadows, and that coveted cinematic glow";
551
  } else if (params.lighting === "Blue Hour") {
552
  cameraPrompt += ", captured during blue hour with soft, even twilight illumination and cool color temperature for moody cinematic atmosphere";
553
+ } else if (params.lighting === "Studio") {
554
  cameraPrompt += ", professionally lit with multi-point studio lighting setup featuring key light, fill light, and rim light for commercial cinema quality";
555
+ } else if (params.lighting === "Natural") {
556
+ cameraPrompt += ", naturally lit with soft, diffused daylight providing even illumination and organic shadow patterns";
557
+ } else if (params.lighting === "Dramatic") {
558
+ cameraPrompt += ", dramatically lit with high-contrast lighting creating strong shadows and highlights for cinematic tension";
 
 
 
 
 
 
 
 
 
 
 
 
559
  } else {
560
  cameraPrompt += `, professionally lit with ${params.lighting} lighting setup`;
561
  }
562
  }
563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
  if (params.bokeh) {
565
  if (params.bokeh === "Smooth Bokeh") {
566
  cameraPrompt += ", featuring silky smooth bokeh with perfectly circular out-of-focus highlights and creamy background transitions";
567
  } else if (params.bokeh === "Swirly Bokeh") {
568
+ cameraPrompt += ", featuring artistic swirly bokeh with spiral-like background blur patterns for unique visual character";
569
  } else if (params.bokeh === "Hexagonal Bokeh") {
570
+ cameraPrompt += ", featuring hexagonal bokeh with geometric six-sided highlight shapes typical of cinema lenses";
 
 
 
 
 
 
571
  } else {
572
  cameraPrompt += `, featuring ${params.bokeh} quality bokeh rendering in out-of-focus areas`;
573
  }
574
  }
575
 
 
576
  if (params.motionBlur) {
577
  if (params.motionBlur === "Light Motion Blur") {
578
  cameraPrompt += ", with subtle motion blur suggesting gentle movement and adding cinematic flow to the image";
579
  } else if (params.motionBlur === "Medium Motion Blur") {
580
  cameraPrompt += ", with moderate motion blur creating dynamic energy and sense of movement typical of action cinematography";
581
  } else if (params.motionBlur === "Heavy Motion Blur") {
582
+ cameraPrompt += ", with pronounced motion blur creating dramatic movement streaks and high-energy cinematic action";
583
  } else if (params.motionBlur === "Radial Blur") {
584
  cameraPrompt += ", with radial motion blur emanating from the center, creating explosive zoom-like movement and dramatic focus pull";
585
  } else if (params.motionBlur === "Zoom Blur") {
 
589
  }
590
  }
591
 
592
+ if (params.angle) {
593
+ if (params.angle === "Low Angle") {
594
+ cameraPrompt += ", shot from a low-angle perspective looking upward for dramatic impact";
595
+ } else if (params.angle === "Bird's Eye") {
596
+ cameraPrompt += ", captured from a bird's eye view directly overhead";
597
+ } else {
598
+ cameraPrompt += `, ${params.angle} camera angle`;
599
+ }
600
+ }
601
+
602
  if (params.filmStyle && params.filmStyle !== "RAW") {
603
  cameraPrompt += `, processed with ${params.filmStyle} film aesthetic and color grading`;
604
  } else if (params.filmStyle === "RAW") {
app/globals.css CHANGED
@@ -162,11 +162,13 @@ body {
162
 
163
  /* Nano Banana Editor - node visuals */
164
  .nb-node {
165
- background: hsl(var(--card) / 0.9);
166
- border: 1px solid hsl(var(--border) / 0.6);
167
- box-shadow: 0 10px 30px rgba(0,0,0,0.35);
 
168
  border-radius: var(--radius);
169
- backdrop-filter: blur(6px);
 
170
  /* Prevent blurring on zoom */
171
  image-rendering: -webkit-optimize-contrast;
172
  image-rendering: crisp-edges;
@@ -176,6 +178,12 @@ body {
176
  -moz-osx-font-smoothing: grayscale;
177
  backface-visibility: hidden;
178
  perspective: 1000px;
 
 
 
 
 
 
179
  }
180
 
181
  /* Prevent text selection on node elements except inputs */
@@ -189,54 +197,84 @@ body {
189
  .nb-node select {
190
  user-select: text;
191
  -webkit-user-select: text;
 
 
 
 
 
 
 
 
 
 
 
 
192
  }
 
193
  .nb-node .nb-header {
194
- background: linear-gradient(to bottom, hsl(var(--muted) / 0.35), hsl(var(--muted) / 0.08));
 
 
195
  }
 
196
  .nb-port {
197
- width: 20px;
198
- height: 20px;
199
  border-radius: 9999px;
200
- border: 3px solid rgba(255,255,255,0.6);
201
- background: hsl(var(--popover));
202
  cursor: crosshair;
203
  transition: transform 0.15s ease, background 0.15s ease, box-shadow 0.15s ease;
204
  position: relative;
205
  user-select: none;
206
  -webkit-user-select: none;
 
207
  }
 
208
  .nb-port:hover {
209
- transform: scale(1.25);
210
- background: hsl(var(--accent));
211
- box-shadow: 0 0 12px hsl(var(--ring) / 0.4);
212
  }
213
- .nb-port.out {
214
- border-color: hsl(var(--primary));
 
 
215
  }
216
- .nb-port.out:hover {
 
217
  background: hsl(var(--primary));
218
- box-shadow: 0 0 16px hsl(var(--primary) / 0.6);
219
  }
220
- .nb-port.in {
221
- border-color: #34d399;
 
 
222
  }
223
- .nb-port.in:hover {
224
- background: #34d399;
225
- box-shadow: 0 0 16px rgba(52,211,153,0.6);
 
226
  }
227
 
228
  .nb-line {
229
- stroke: #7c7c7c;
230
  stroke-width: 2.5;
 
 
 
 
 
 
 
231
  }
232
- .nb-line.active { stroke: #8b5cf6; }
233
 
234
  /* Canvas grid */
235
  .nb-canvas {
236
  background-color: hsl(var(--background));
237
  background-image:
238
- radial-gradient(circle at 1px 1px, hsl(var(--muted-foreground) / 0.18) 1px, transparent 0),
239
- radial-gradient(circle at 1px 1px, hsl(var(--muted-foreground) / 0.09) 1px, transparent 0);
240
  background-size: 20px 20px, 100px 100px;
241
  }
242
 
@@ -247,7 +285,8 @@ body {
247
  }
248
 
249
  .nb-node img {
250
- image-rendering: auto; /* Keep images smooth */
 
251
  image-rendering: -webkit-optimize-contrast;
252
  }
253
 
@@ -261,7 +300,7 @@ body {
261
  }
262
 
263
  /* Force GPU acceleration for transforms */
264
- .nb-canvas > div {
265
  transform-style: preserve-3d;
266
  -webkit-transform-style: preserve-3d;
267
  }
@@ -272,4 +311,4 @@ body {
272
  -webkit-backface-visibility: hidden;
273
  -webkit-transform: translateZ(0) scale(1.0, 1.0);
274
  }
275
- }
 
162
 
163
  /* Nano Banana Editor - node visuals */
164
  .nb-node {
165
+ background: hsl(var(--card) / 0.85);
166
+ color: hsl(var(--card-foreground));
167
+ border: 1px solid hsl(var(--border));
168
+ box-shadow: 0 10px 30px -10px hsl(var(--shadow) / 0.2), 0 4px 12px -4px hsl(var(--shadow) / 0.1);
169
  border-radius: var(--radius);
170
+ backdrop-filter: blur(12px);
171
+ -webkit-backdrop-filter: blur(12px);
172
  /* Prevent blurring on zoom */
173
  image-rendering: -webkit-optimize-contrast;
174
  image-rendering: crisp-edges;
 
178
  -moz-osx-font-smoothing: grayscale;
179
  backface-visibility: hidden;
180
  perspective: 1000px;
181
+ transition: box-shadow 0.2s ease, border-color 0.2s ease;
182
+ }
183
+
184
+ .nb-node:hover {
185
+ border-color: hsl(var(--ring) / 0.5);
186
+ box-shadow: 0 14px 40px -10px hsl(var(--shadow) / 0.3), 0 4px 12px -4px hsl(var(--shadow) / 0.2);
187
  }
188
 
189
  /* Prevent text selection on node elements except inputs */
 
197
  .nb-node select {
198
  user-select: text;
199
  -webkit-user-select: text;
200
+ background: hsl(var(--background) / 0.5);
201
+ color: hsl(var(--foreground));
202
+ border: 1px solid hsl(var(--border));
203
+ border-radius: calc(var(--radius) - 4px);
204
+ }
205
+
206
+ .nb-node input:focus,
207
+ .nb-node textarea:focus,
208
+ .nb-node select:focus {
209
+ outline: none;
210
+ border-color: hsl(var(--ring));
211
+ box-shadow: 0 0 0 2px hsl(var(--ring) / 0.2);
212
  }
213
+
214
  .nb-node .nb-header {
215
+ background: hsl(var(--muted) / 0.5);
216
+ border-bottom: 1px solid hsl(var(--border) / 0.5);
217
+ color: hsl(var(--foreground));
218
  }
219
+
220
  .nb-port {
221
+ width: 16px;
222
+ height: 16px;
223
  border-radius: 9999px;
224
+ border: 2px solid hsl(var(--background));
225
+ background: hsl(var(--muted-foreground));
226
  cursor: crosshair;
227
  transition: transform 0.15s ease, background 0.15s ease, box-shadow 0.15s ease;
228
  position: relative;
229
  user-select: none;
230
  -webkit-user-select: none;
231
+ box-shadow: 0 0 0 1px hsl(var(--border));
232
  }
233
+
234
  .nb-port:hover {
235
+ transform: scale(1.4);
236
+ background: hsl(var(--primary));
237
+ box-shadow: 0 0 0 4px hsl(var(--primary) / 0.2);
238
  }
239
+
240
+ .nb-port.out {
241
+ background: hsl(var(--primary));
242
+ border-color: hsl(var(--background));
243
  }
244
+
245
+ .nb-port.out:hover {
246
  background: hsl(var(--primary));
247
+ box-shadow: 0 0 0 4px hsl(var(--primary) / 0.3);
248
  }
249
+
250
+ .nb-port.in {
251
+ background: hsl(var(--secondary-foreground));
252
+ border-color: hsl(var(--background));
253
  }
254
+
255
+ .nb-port.in:hover {
256
+ background: hsl(var(--secondary-foreground));
257
+ box-shadow: 0 0 0 4px hsl(var(--secondary-foreground) / 0.3);
258
  }
259
 
260
  .nb-line {
261
+ stroke: hsl(var(--muted-foreground));
262
  stroke-width: 2.5;
263
+ transition: stroke 0.2s ease, stroke-width 0.2s ease;
264
+ }
265
+
266
+ .nb-line.active {
267
+ stroke: hsl(var(--primary));
268
+ stroke-width: 3.5;
269
+ filter: drop-shadow(0 0 4px hsl(var(--primary) / 0.5));
270
  }
 
271
 
272
  /* Canvas grid */
273
  .nb-canvas {
274
  background-color: hsl(var(--background));
275
  background-image:
276
+ radial-gradient(circle at 1px 1px, hsl(var(--muted-foreground) / 0.2) 1px, transparent 0),
277
+ radial-gradient(circle at 1px 1px, hsl(var(--muted-foreground) / 0.1) 1px, transparent 0);
278
  background-size: 20px 20px, 100px 100px;
279
  }
280
 
 
285
  }
286
 
287
  .nb-node img {
288
+ image-rendering: auto;
289
+ /* Keep images smooth */
290
  image-rendering: -webkit-optimize-contrast;
291
  }
292
 
 
300
  }
301
 
302
  /* Force GPU acceleration for transforms */
303
+ .nb-canvas>div {
304
  transform-style: preserve-3d;
305
  -webkit-transform-style: preserve-3d;
306
  }
 
311
  -webkit-backface-visibility: hidden;
312
  -webkit-transform: translateZ(0) scale(1.0, 1.0);
313
  }
314
+ }
app/layout.tsx CHANGED
@@ -13,8 +13,10 @@
13
  */
14
 
15
  import type { Metadata } from "next";
16
- import { Geist, Geist_Mono } from "next/font/google"; // Modern Google Fonts
17
- import "./globals.css"; // Tailwind CSS and global styles
 
 
18
 
19
  /**
20
  * Configure Geist Sans font
@@ -60,11 +62,19 @@ export default function RootLayout({
60
  children: React.ReactNode; // Type-safe children prop
61
  }>) {
62
  return (
63
- <html lang="en">
64
  <body
65
  className={`${geistSans.variable} ${geistMono.variable} antialiased bg-background text-foreground font-sans`}
66
  >
67
- {children}
 
 
 
 
 
 
 
 
68
  </body>
69
  </html>
70
  );
 
13
  */
14
 
15
  import type { Metadata } from "next";
16
+ import { Geist, Geist_Mono } from "next/font/google";
17
+ import "./globals.css";
18
+ import { ThemeProvider } from "@/components/theme-provider";
19
+ import { ModeToggle } from "@/components/mode-toggle";
20
 
21
  /**
22
  * Configure Geist Sans font
 
62
  children: React.ReactNode; // Type-safe children prop
63
  }>) {
64
  return (
65
+ <html lang="en" suppressHydrationWarning>
66
  <body
67
  className={`${geistSans.variable} ${geistMono.variable} antialiased bg-background text-foreground font-sans`}
68
  >
69
+ <ThemeProvider
70
+ attribute="class"
71
+ defaultTheme="system"
72
+ enableSystem
73
+ disableTransitionOnChange
74
+ >
75
+ {children}
76
+ <ModeToggle />
77
+ </ThemeProvider>
78
  </body>
79
  </html>
80
  );
app/nodes.tsx CHANGED
@@ -89,7 +89,7 @@ async function copyImageToClipboard(dataUrl: string) {
89
  // Fetch the data URL and convert it to a Blob object
90
  const response = await fetch(dataUrl); // Fetch the base64 data URL
91
  const blob = await response.blob(); // Convert response to Blob format
92
-
93
  // The browser clipboard API only supports PNG format for images
94
  // If the image is not PNG, we need to convert it first
95
  if (blob.type !== 'image/png') {
@@ -97,7 +97,7 @@ async function copyImageToClipboard(dataUrl: string) {
97
  const canvas = document.createElement('canvas'); // Create invisible canvas
98
  const ctx = canvas.getContext('2d'); // Get 2D drawing context
99
  const img = new Image(); // Create image element
100
-
101
  // Wait for the image to load before processing
102
  await new Promise((resolve) => {
103
  img.onload = () => { // When image loads
@@ -108,12 +108,12 @@ async function copyImageToClipboard(dataUrl: string) {
108
  };
109
  img.src = dataUrl; // Start loading the image
110
  });
111
-
112
  // Convert the canvas content to PNG blob
113
  const pngBlob = await new Promise<Blob>((resolve) => {
114
  canvas.toBlob((blob) => resolve(blob!), 'image/png'); // Convert canvas to PNG blob
115
  });
116
-
117
  // Write the converted PNG blob to clipboard
118
  await navigator.clipboard.write([
119
  new ClipboardItem({ 'image/png': pngBlob }) // Create clipboard item with PNG data
@@ -169,7 +169,7 @@ function NodeOutputSection({
169
  }) {
170
  // If no image is available, don't render anything
171
  if (!output) return null;
172
-
173
  return (
174
  // Main container for output section with vertical spacing
175
  <div className="space-y-2">
@@ -181,7 +181,7 @@ function NodeOutputSection({
181
  <div className="text-xs text-white/70">Output</div>
182
  </div>
183
  {/* Output image with click-to-copy functionality */}
184
- <img
185
  src={output} // Display the output image
186
  className="w-full rounded cursor-pointer hover:opacity-80 transition-all duration-200 hover:ring-2 hover:ring-white/30" // Styling with hover effects
187
  alt="Output" // Accessibility description
@@ -189,14 +189,14 @@ function NodeOutputSection({
189
  onContextMenu={(e) => { // Right-click context menu handler
190
  e.preventDefault(); // Prevent browser context menu from appearing
191
  copyImageToClipboard(output); // Copy image to clipboard
192
-
193
  // Show brief visual feedback when image is copied
194
  const img = e.currentTarget; // Get the image element
195
  const originalTitle = img.title; // Store original tooltip text
196
  img.title = "Copied to clipboard!"; // Update tooltip to show success
197
  img.style.filter = "brightness(1.2)"; // Brighten the image briefly
198
  img.style.transform = "scale(0.98)"; // Slightly scale down the image
199
-
200
  // Reset visual feedback after 300ms
201
  setTimeout(() => {
202
  img.title = originalTitle; // Restore original tooltip
@@ -270,12 +270,12 @@ function useNodeDrag(node: any, onUpdatePosition?: (id: string, x: number, y: nu
270
  const [localPos, setLocalPos] = useState({ x: node.x, y: node.y }); // Local position for smooth dragging
271
  const dragging = useRef(false); // Track drag state
272
  const start = useRef<{ sx: number; sy: number; ox: number; oy: number } | null>(null); // Drag start coordinates
273
-
274
  // Sync local position when parent position changes
275
  useEffect(() => {
276
  setLocalPos({ x: node.x, y: node.y });
277
  }, [node.x, node.y]);
278
-
279
  /**
280
  * Handle pointer down - start dragging
281
  * Captures the pointer and records starting positions
@@ -286,7 +286,7 @@ function useNodeDrag(node: any, onUpdatePosition?: (id: string, x: number, y: nu
286
  start.current = { sx: e.clientX, sy: e.clientY, ox: localPos.x, oy: localPos.y }; // Record start positions
287
  (e.currentTarget as HTMLElement).setPointerCapture(e.pointerId); // Capture pointer for reliable tracking
288
  };
289
-
290
  /**
291
  * Handle pointer move - update position during drag
292
  * Calculates new position based on mouse movement delta
@@ -300,7 +300,7 @@ function useNodeDrag(node: any, onUpdatePosition?: (id: string, x: number, y: nu
300
  setLocalPos({ x: newX, y: newY }); // Update local position for immediate visual feedback
301
  if (onUpdatePosition) onUpdatePosition(node.id, newX, newY); // Update parent state
302
  };
303
-
304
  /**
305
  * Handle pointer up - end dragging
306
  * Releases pointer capture and resets drag state
@@ -310,7 +310,7 @@ function useNodeDrag(node: any, onUpdatePosition?: (id: string, x: number, y: nu
310
  start.current = null; // Clear start position
311
  (e.currentTarget as HTMLElement).releasePointerCapture(e.pointerId); // Release pointer
312
  };
313
-
314
  return { localPos, onPointerDown, onPointerMove, onPointerUp };
315
  }
316
 
@@ -331,14 +331,14 @@ function useNodeDrag(node: any, onUpdatePosition?: (id: string, x: number, y: nu
331
  * @param onStartConnection Callback when starting a connection from this port
332
  * @param onEndConnection Callback when ending a connection at this port
333
  */
334
- function Port({
335
- className,
336
  nodeId,
337
  isOutput,
338
  onStartConnection,
339
  onEndConnection,
340
  onDisconnect
341
- }: {
342
  className?: string;
343
  nodeId?: string;
344
  isOutput?: boolean;
@@ -355,7 +355,7 @@ function Port({
355
  onStartConnection(nodeId); // Start connection from this output port
356
  }
357
  };
358
-
359
  /**
360
  * Handle ending a connection (pointer up on input port)
361
  */
@@ -378,15 +378,15 @@ function Port({
378
  };
379
 
380
  return (
381
- <div
382
  className={cx("nb-port", className)} // Combine base port classes with custom ones
383
  onPointerDown={handlePointerDown} // Start connection drag from output ports
384
  onPointerUp={handlePointerUp} // End connection drag at input ports
385
  onPointerEnter={handlePointerUp} // Also accept connections on hover (better UX)
386
  onClick={handleClick} // Allow clicking input ports to disconnect
387
  title={
388
- isOutput
389
- ? "Drag from here to connect to another node's input"
390
  : "Drop connections here or click to disconnect"
391
  }
392
  />
@@ -429,11 +429,10 @@ export function BackgroundNodeView({
429
  onEndConnection,
430
  onProcess,
431
  onUpdatePosition,
432
- apiToken
433
  }: any) {
434
  // Use custom drag hook to handle node positioning in the editor
435
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
436
-
437
  /**
438
  * Handle image file upload from file input
439
  * Converts uploaded file to base64 data URL for storage and preview
@@ -447,14 +446,14 @@ export function BackgroundNodeView({
447
  reader.readAsDataURL(e.target.files[0]); // Convert file to base64
448
  }
449
  };
450
-
451
  /**
452
  * Handle image paste from clipboard
453
  * Supports both image files and image URLs pasted from clipboard
454
  */
455
  const handleImagePaste = (e: React.ClipboardEvent) => {
456
  const items = e.clipboardData.items; // Get clipboard items
457
-
458
  // First, try to find image files in clipboard
459
  for (let i = 0; i < items.length; i++) {
460
  if (items[i].type.startsWith("image/")) { // Check if item is an image
@@ -469,14 +468,14 @@ export function BackgroundNodeView({
469
  }
470
  }
471
  }
472
-
473
  // If no image files, check for text that might be image URLs
474
  const text = e.clipboardData.getData("text"); // Get text from clipboard
475
  if (text && (text.startsWith("http") || text.startsWith("data:image"))) {
476
  onUpdate(node.id, { customBackgroundImage: text }); // Use URL directly
477
  }
478
  };
479
-
480
  const handleDrop = (e: React.DragEvent) => {
481
  e.preventDefault();
482
  const files = e.dataTransfer.files;
@@ -488,16 +487,16 @@ export function BackgroundNodeView({
488
  reader.readAsDataURL(files[0]);
489
  }
490
  };
491
-
492
  return (
493
- <div
494
- className="nb-node absolute text-white w-[320px]"
495
  style={{ left: localPos.x, top: localPos.y }}
496
  onDrop={handleDrop}
497
  onDragOver={(e) => e.preventDefault()}
498
  onPaste={handleImagePaste}
499
  >
500
- <div
501
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
502
  onPointerDown={onPointerDown}
503
  onPointerMove={onPointerMove}
@@ -540,7 +539,7 @@ export function BackgroundNodeView({
540
  </Button>
541
  </div>
542
  )}
543
- <Select
544
  className="w-full"
545
  value={node.backgroundType || "color"}
546
  onChange={(e) => onUpdate(node.id, { backgroundType: (e.target as HTMLSelectElement).value })}
@@ -553,7 +552,7 @@ export function BackgroundNodeView({
553
  <option value="upload">Upload Image</option>
554
  <option value="custom">Custom Prompt</option>
555
  </Select>
556
-
557
  {node.backgroundType === "color" && (
558
  <ColorPicker
559
  className="w-full"
@@ -561,11 +560,11 @@ export function BackgroundNodeView({
561
  onChange={(e) => onUpdate(node.id, { backgroundColor: (e.target as HTMLInputElement).value })}
562
  />
563
  )}
564
-
565
  {node.backgroundType === "gradient" && (
566
  <div className="space-y-3">
567
  <label className="text-xs text-white/70">Gradient Direction</label>
568
- <Select
569
  className="w-full"
570
  value={node.gradientDirection || "to right"}
571
  onChange={(e) => onUpdate(node.id, { gradientDirection: (e.target as HTMLSelectElement).value })}
@@ -592,10 +591,10 @@ export function BackgroundNodeView({
592
  value={node.gradientEndColor || "#4ecdc4"}
593
  onChange={(e) => onUpdate(node.id, { gradientEndColor: (e.target as HTMLInputElement).value })}
594
  />
595
- <div
596
  className="w-full h-8 rounded-md border border-white/20"
597
  style={{
598
- background: node.gradientDirection === "radial"
599
  ? `radial-gradient(circle, ${node.gradientStartColor || "#ff6b6b"} 0%, ${node.gradientEndColor || "#4ecdc4"} 100%)`
600
  : `linear-gradient(${node.gradientDirection || "to right"}, ${node.gradientStartColor || "#ff6b6b"} 0%, ${node.gradientEndColor || "#4ecdc4"} 100%)`
601
  }}
@@ -603,9 +602,9 @@ export function BackgroundNodeView({
603
  />
604
  </div>
605
  )}
606
-
607
  {node.backgroundType === "image" && (
608
- <Select
609
  className="w-full"
610
  value={node.backgroundImage || ""}
611
  onChange={(e) => onUpdate(node.id, { backgroundImage: (e.target as HTMLSelectElement).value })}
@@ -618,11 +617,11 @@ export function BackgroundNodeView({
618
  <option value="city">City Skyline</option>
619
  </Select>
620
  )}
621
-
622
  {node.backgroundType === "city" && (
623
  <div className="space-y-3">
624
  <label className="text-xs text-white/70">City Scene Type</label>
625
- <Select
626
  className="w-full"
627
  value={node.citySceneType || "busy_street"}
628
  onChange={(e) => onUpdate(node.id, { citySceneType: (e.target as HTMLSelectElement).value })}
@@ -640,7 +639,7 @@ export function BackgroundNodeView({
640
  <option value="matrix_alley">Matrix Style Urban Alley</option>
641
  </Select>
642
  <label className="text-xs text-white/70">Time of Day</label>
643
- <Select
644
  className="w-full"
645
  value={node.cityTimeOfDay || "daytime"}
646
  onChange={(e) => onUpdate(node.id, { cityTimeOfDay: (e.target as HTMLSelectElement).value })}
@@ -654,11 +653,11 @@ export function BackgroundNodeView({
654
  </Select>
655
  </div>
656
  )}
657
-
658
  {node.backgroundType === "photostudio" && (
659
  <div className="space-y-3">
660
  <label className="text-xs text-white/70">Studio Setup</label>
661
- <Select
662
  className="w-full"
663
  value={node.studioSetup || "white_seamless"}
664
  onChange={(e) => onUpdate(node.id, { studioSetup: (e.target as HTMLSelectElement).value })}
@@ -681,7 +680,7 @@ export function BackgroundNodeView({
681
  </>
682
  )}
683
  <label className="text-xs text-white/70">Lighting Setup</label>
684
- <Select
685
  className="w-full"
686
  value={node.studioLighting || "key_fill"}
687
  onChange={(e) => onUpdate(node.id, { studioLighting: (e.target as HTMLSelectElement).value })}
@@ -704,13 +703,13 @@ export function BackgroundNodeView({
704
  </div>
705
  </div>
706
  )}
707
-
708
  {node.backgroundType === "upload" && (
709
  <div className="space-y-2">
710
  {node.customBackgroundImage ? (
711
  <div className="relative">
712
  <img src={node.customBackgroundImage} className="w-full rounded" alt="Custom Background" />
713
- <Button
714
  variant="destructive"
715
  size="sm"
716
  className="absolute top-2 right-2"
@@ -735,7 +734,7 @@ export function BackgroundNodeView({
735
  )}
736
  </div>
737
  )}
738
-
739
  {node.backgroundType === "custom" && (
740
  <div className="space-y-2">
741
  <Textarea
@@ -754,18 +753,17 @@ export function BackgroundNodeView({
754
  alert('Please enter a background description first');
755
  return;
756
  }
757
-
758
  try {
759
  const response = await fetch('/api/improve-prompt', {
760
  method: 'POST',
761
  headers: { 'Content-Type': 'application/json' },
762
  body: JSON.stringify({
763
  prompt: node.customPrompt,
764
- type: 'background',
765
- apiToken: apiToken || undefined
766
  })
767
  });
768
-
769
  if (response.ok) {
770
  const { improvedPrompt } = await response.json();
771
  onUpdate(node.id, { customPrompt: improvedPrompt });
@@ -783,8 +781,8 @@ export function BackgroundNodeView({
783
  </Button>
784
  </div>
785
  )}
786
-
787
- <Button
788
  className="w-full"
789
  onClick={() => onProcess(node.id)}
790
  disabled={node.isRunning}
@@ -792,7 +790,7 @@ export function BackgroundNodeView({
792
  >
793
  {node.isRunning ? "Processing..." : "Apply Background"}
794
  </Button>
795
-
796
  <NodeOutputSection
797
  nodeId={node.id}
798
  output={node.output}
@@ -836,7 +834,7 @@ export function BackgroundNodeView({
836
  export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
837
  // Handle node dragging functionality
838
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
839
-
840
  /**
841
  * Preset clothing options available for quick selection
842
  * Each preset includes a display name and path to the reference image
@@ -880,16 +878,16 @@ export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, o
880
  const selectPreset = (presetPath: string, presetName: string) => {
881
  onUpdate(node.id, { clothesImage: presetPath, selectedPreset: presetName });
882
  };
883
-
884
  return (
885
- <div
886
- className="nb-node absolute text-white w-[320px]"
887
  style={{ left: localPos.x, top: localPos.y }}
888
  onDrop={onDrop}
889
  onDragOver={(e) => e.preventDefault()}
890
  onPaste={onPaste}
891
  >
892
- <div
893
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
894
  onPointerDown={onPointerDown}
895
  onPointerMove={onPointerMove}
@@ -932,18 +930,17 @@ export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, o
932
  </Button>
933
  </div>
934
  )}
935
- <div className="text-xs text-white/70">Clothes Reference</div>
936
-
937
  {/* Preset clothes options */}
938
  <div className="flex gap-2">
939
  {presetClothes.map((preset) => (
940
  <button
941
  key={preset.name}
942
- className={`flex-1 p-2 rounded border ${
943
- node.selectedPreset === preset.name
944
- ? "border-indigo-400 bg-indigo-500/20"
945
- : "border-white/20 hover:border-white/40"
946
- }`}
947
  onClick={() => selectPreset(preset.path, preset.name)}
948
  >
949
  <img src={preset.path} alt={preset.name} className="w-full h-28 object-contain rounded mb-1" />
@@ -951,14 +948,14 @@ export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, o
951
  </button>
952
  ))}
953
  </div>
954
-
955
- <div className="text-xs text-white/50 text-center">— or —</div>
956
-
957
  {/* Custom image upload */}
958
  {node.clothesImage && !node.selectedPreset ? (
959
  <div className="relative">
960
  <img src={node.clothesImage} className="w-full rounded" alt="Clothes" />
961
- <Button
962
  variant="destructive"
963
  size="sm"
964
  className="absolute top-2 right-2"
@@ -981,15 +978,15 @@ export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, o
981
  }
982
  }}
983
  />
984
- <div className="border-2 border-dashed border-white/20 rounded-lg p-6 text-center cursor-pointer hover:border-white/40 transition-colors">
985
- <div className="text-white/40 text-lg mb-2">📁</div>
986
- <p className="text-sm text-white/70 font-medium">Drop, upload, or paste clothes image</p>
987
- <p className="text-xs text-white/50 mt-1">JPG, PNG, WebP supported</p>
988
  </div>
989
  </label>
990
  ) : null}
991
-
992
- <Button
993
  className="w-full"
994
  onClick={() => onProcess(node.id)}
995
  disabled={node.isRunning || !node.clothesImage}
@@ -1043,10 +1040,10 @@ export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, o
1043
  export function AgeNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1044
  // Handle node dragging functionality
1045
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1046
-
1047
  return (
1048
- <div className="nb-node absolute text-white w-[280px]" style={{ left: localPos.x, top: localPos.y }}>
1049
- <div
1050
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1051
  onPointerDown={onPointerDown}
1052
  onPointerMove={onPointerMove}
@@ -1099,7 +1096,7 @@ export function AgeNodeView({ node, onDelete, onUpdate, onStartConnection, onEnd
1099
  onChange={(e) => onUpdate(node.id, { targetAge: parseInt((e.target as HTMLInputElement).value) })}
1100
  />
1101
  </div>
1102
- <Button
1103
  className="w-full"
1104
  onClick={() => onProcess(node.id)}
1105
  disabled={node.isRunning}
@@ -1158,31 +1155,31 @@ export function AgeNodeView({ node, onDelete, onUpdate, onStartConnection, onEnd
1158
  export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1159
  // Handle node dragging functionality
1160
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1161
-
1162
  // Camera lens focal length options (affects field of view and perspective)
1163
- const focalLengths = ["None", "8mm", "12mm", "24mm", "35mm", "50mm", "85mm", "100mm", "135mm"];
1164
-
1165
  // Aperture settings (affects depth of field and exposure)
1166
- const apertures = ["None", "f/0.95", "f/1.2", "f/1.4", "f/1.8", "f/2", "f/2.8", "f/4", "f/5.6","f/11"];
1167
-
1168
  // Shutter speed options (affects motion blur and exposure)
1169
- const shutterSpeeds = ["None", "1/1000s", "1/250s","1/30s","1/15", "5s", ];
1170
-
1171
  // White balance presets for different lighting conditions
1172
  const whiteBalances = ["None", "2800K candlelight", "3200K tungsten", "4000K fluorescent", "5600K daylight", "6500K cloudy", "7000K shade", "8000K blue sky"];
1173
-
1174
  // Camera angle and perspective options
1175
  const angles = ["None", "eye level", "low angle", "high angle", "Dutch tilt", "bird's eye", "worm's eye", "over the shoulder", "POV"];
1176
-
1177
  // ISO sensitivity values (affects image noise and exposure)
1178
- const isoValues = ["None", "ISO 100", "ISO 400", "ISO 1600", "ISO 6400"];
1179
-
1180
  // Film stock emulation for different photographic styles
1181
- const filmStyles = ["None","RAW","Kodak Portra", "Fuji Velvia", "Kodak Gold 200","Black & White", "Sepia", "Vintage", "Film Noir"];
1182
-
1183
  // Professional lighting setups and natural lighting conditions
1184
  const lightingTypes = ["None", "Natural Light", "Golden Hour", "Blue Hour", "Studio Lighting", "Rembrandt", "Split Lighting", "Butterfly Lighting", "Loop Lighting", "Rim Lighting", "Silhouette", "High Key", "Low Key"];
1185
-
1186
  // Bokeh (background blur) styles for different lens characteristics
1187
  const bokehStyles = ["None", "Smooth Bokeh", "Swirly Bokeh", "Hexagonal Bokeh", "Cat Eye Bokeh", "Bubble Bokeh"];
1188
 
@@ -1190,8 +1187,8 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1190
  const motionBlurOptions = ["None", "Light Motion Blur", "Medium Motion Blur", "Heavy Motion Blur", "Radial Blur", "Zoom Blur"];
1191
 
1192
  return (
1193
- <div className="nb-node absolute text-white w-[360px]" style={{ left: localPos.x, top: localPos.y }}>
1194
- <div
1195
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1196
  onPointerDown={onPointerDown}
1197
  onPointerMove={onPointerMove}
@@ -1234,12 +1231,12 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1234
  </div>
1235
  )}
1236
  {/* Basic Camera Settings Section */}
1237
- <div className="text-xs text-white/50 font-semibold mb-1">Basic Settings</div>
1238
  <div className="grid grid-cols-2 gap-2"> {/* 2-column grid for compact layout */}
1239
  {/* Motion Blur Control - adds movement effects */}
1240
  <div>
1241
- <label className="text-xs text-white/70">Motion Blur</label>
1242
- <Select
1243
  className="w-full"
1244
  value={node.motionBlur || "None"} // Default to "None" if not set
1245
  onChange={(e) => onUpdate(node.id, { motionBlur: (e.target as HTMLSelectElement).value })}
@@ -1250,8 +1247,8 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1250
  </div>
1251
  {/* Focal Length Control - affects field of view and perspective */}
1252
  <div>
1253
- <label className="text-xs text-white/70">Focal Length</label>
1254
- <Select
1255
  className="w-full"
1256
  value={node.focalLength || "None"} // Default to "None" if not set
1257
  onChange={(e) => onUpdate(node.id, { focalLength: (e.target as HTMLSelectElement).value })}
@@ -1260,11 +1257,11 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1260
  {focalLengths.map(f => <option key={f} value={f}>{f}</option>)}
1261
  </Select>
1262
  </div>
1263
-
1264
  {/* Aperture Control - affects depth of field and exposure */}
1265
  <div>
1266
- <label className="text-xs text-white/70">Aperture</label>
1267
- <Select
1268
  className="w-full"
1269
  value={node.aperture || "None"} // Default to "None" if not set
1270
  onChange={(e) => onUpdate(node.id, { aperture: (e.target as HTMLSelectElement).value })}
@@ -1273,11 +1270,11 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1273
  {apertures.map(a => <option key={a} value={a}>{a}</option>)}
1274
  </Select>
1275
  </div>
1276
-
1277
  {/* Shutter Speed Control - affects motion blur and exposure */}
1278
  <div>
1279
- <label className="text-xs text-white/70">Shutter Speed</label>
1280
- <Select
1281
  className="w-full"
1282
  value={node.shutterSpeed || "None"} // Default to "None" if not set
1283
  onChange={(e) => onUpdate(node.id, { shutterSpeed: (e.target as HTMLSelectElement).value })}
@@ -1286,11 +1283,11 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1286
  {shutterSpeeds.map(s => <option key={s} value={s}>{s}</option>)}
1287
  </Select>
1288
  </div>
1289
-
1290
  {/* ISO Control - affects sensor sensitivity and image noise */}
1291
  <div>
1292
- <label className="text-xs text-white/70">ISO</label>
1293
- <Select
1294
  className="w-full"
1295
  value={node.iso || "None"} // Default to "None" if not set
1296
  onChange={(e) => onUpdate(node.id, { iso: (e.target as HTMLSelectElement).value })}
@@ -1300,13 +1297,13 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1300
  </Select>
1301
  </div>
1302
  </div>
1303
-
1304
  {/* Creative Settings */}
1305
  <div className="text-xs text-white/50 font-semibold mb-1 mt-3">Creative Settings</div>
1306
  <div className="grid grid-cols-2 gap-2">
1307
  <div>
1308
- <label className="text-xs text-white/70">White Balance</label>
1309
- <Select
1310
  className="w-full"
1311
  value={node.whiteBalance || "None"}
1312
  onChange={(e) => onUpdate(node.id, { whiteBalance: (e.target as HTMLSelectElement).value })}
@@ -1315,8 +1312,8 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1315
  </Select>
1316
  </div>
1317
  <div>
1318
- <label className="text-xs text-white/70">Film Style</label>
1319
- <Select
1320
  className="w-full"
1321
  value={node.filmStyle || "None"}
1322
  onChange={(e) => onUpdate(node.id, { filmStyle: (e.target as HTMLSelectElement).value })}
@@ -1325,8 +1322,8 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1325
  </Select>
1326
  </div>
1327
  <div>
1328
- <label className="text-xs text-white/70">Lighting</label>
1329
- <Select
1330
  className="w-full"
1331
  value={node.lighting || "None"}
1332
  onChange={(e) => onUpdate(node.id, { lighting: (e.target as HTMLSelectElement).value })}
@@ -1335,8 +1332,8 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1335
  </Select>
1336
  </div>
1337
  <div>
1338
- <label className="text-xs text-white/70">Bokeh Style</label>
1339
- <Select
1340
  className="w-full"
1341
  value={node.bokeh || "None"}
1342
  onChange={(e) => onUpdate(node.id, { bokeh: (e.target as HTMLSelectElement).value })}
@@ -1345,13 +1342,13 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1345
  </Select>
1346
  </div>
1347
  </div>
1348
-
1349
  {/* Composition Settings */}
1350
- <div className="text-xs text-white/50 font-semibold mb-1 mt-3">Composition</div>
1351
  <div className="grid grid-cols-2 gap-2">
1352
  <div>
1353
- <label className="text-xs text-white/70">Camera Angle</label>
1354
- <Select
1355
  className="w-full"
1356
  value={node.angle || "None"}
1357
  onChange={(e) => onUpdate(node.id, { angle: (e.target as HTMLSelectElement).value })}
@@ -1360,7 +1357,7 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1360
  </Select>
1361
  </div>
1362
  </div>
1363
- <Button
1364
  className="w-full"
1365
  onClick={() => onProcess(node.id)}
1366
  disabled={node.isRunning}
@@ -1419,19 +1416,19 @@ export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, on
1419
  export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1420
  // Handle node dragging functionality
1421
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1422
-
1423
  // Available hairstyle options for hair modification
1424
  const hairstyles = ["None", "short", "long", "curly", "straight", "bald", "mohawk", "ponytail"];
1425
-
1426
  // Facial expression options for emotion changes
1427
  const expressions = ["None", "happy", "serious", "smiling", "laughing", "sad", "surprised", "angry"];
1428
-
1429
  // Beard and facial hair styling options
1430
  const beardStyles = ["None", "stubble", "goatee", "full beard", "mustache", "clean shaven"];
1431
 
1432
  return (
1433
- <div className="nb-node absolute text-white w-[340px]" style={{ left: localPos.x, top: localPos.y }}>
1434
- <div
1435
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1436
  onPointerDown={onPointerDown}
1437
  onPointerMove={onPointerMove}
@@ -1477,10 +1474,10 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1477
  <div className="space-y-2">
1478
  {/* Pimple removal option for skin enhancement */}
1479
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1480
- <Checkbox
1481
  checked={node.faceOptions?.removePimples || false} // Default to false if not set
1482
- onChange={(e) => onUpdate(node.id, {
1483
- faceOptions: {
1484
  ...node.faceOptions, // Preserve existing options
1485
  removePimples: (e.target as HTMLInputElement).checked // Update pimple removal setting
1486
  }
@@ -1488,13 +1485,13 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1488
  />
1489
  Remove pimples {/* Clean up skin imperfections */}
1490
  </label>
1491
-
1492
  {/* Sunglasses addition option */}
1493
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1494
- <Checkbox
1495
  checked={node.faceOptions?.addSunglasses || false} // Default to false if not set
1496
- onChange={(e) => onUpdate(node.id, {
1497
- faceOptions: {
1498
  ...node.faceOptions, // Preserve existing options
1499
  addSunglasses: (e.target as HTMLInputElement).checked // Update sunglasses setting
1500
  }
@@ -1502,13 +1499,13 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1502
  />
1503
  Add sunglasses {/* Add stylish sunglasses accessory */}
1504
  </label>
1505
-
1506
  {/* Hat addition option */}
1507
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1508
- <Checkbox
1509
  checked={node.faceOptions?.addHat || false} // Default to false if not set
1510
- onChange={(e) => onUpdate(node.id, {
1511
- faceOptions: {
1512
  ...node.faceOptions, // Preserve existing options
1513
  addHat: (e.target as HTMLInputElement).checked // Update hat setting
1514
  }
@@ -1517,39 +1514,39 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1517
  Add hat {/* Add hat accessory */}
1518
  </label>
1519
  </div>
1520
-
1521
  <div>
1522
  <label className="text-xs text-white/70">Hairstyle</label>
1523
- <Select
1524
  className="w-full"
1525
  value={node.faceOptions?.changeHairstyle || "None"}
1526
- onChange={(e) => onUpdate(node.id, {
1527
  faceOptions: { ...node.faceOptions, changeHairstyle: (e.target as HTMLSelectElement).value }
1528
  })}
1529
  >
1530
  {hairstyles.map(h => <option key={h} value={h}>{h}</option>)}
1531
  </Select>
1532
  </div>
1533
-
1534
  <div>
1535
  <label className="text-xs text-white/70">Expression</label>
1536
- <Select
1537
  className="w-full"
1538
  value={node.faceOptions?.facialExpression || "None"}
1539
- onChange={(e) => onUpdate(node.id, {
1540
  faceOptions: { ...node.faceOptions, facialExpression: (e.target as HTMLSelectElement).value }
1541
  })}
1542
  >
1543
  {expressions.map(e => <option key={e} value={e}>{e}</option>)}
1544
  </Select>
1545
  </div>
1546
-
1547
  <div>
1548
  <label className="text-xs text-white/70">Beard</label>
1549
- <Select
1550
  className="w-full"
1551
  value={node.faceOptions?.beardStyle || "None"}
1552
- onChange={(e) => onUpdate(node.id, {
1553
  faceOptions: { ...node.faceOptions, beardStyle: (e.target as HTMLSelectElement).value }
1554
  })}
1555
  >
@@ -1559,18 +1556,17 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1559
 
1560
  {/* Makeup Selection Section - allows users to choose makeup application */}
1561
  <div>
1562
- <label className="text-xs text-white/70">Makeup</label>
1563
  <div className="grid grid-cols-2 gap-2 mt-2"> {/* 2-column grid for makeup options */}
1564
-
1565
  {/* No Makeup Option - removes or prevents makeup application */}
1566
  <button
1567
- className={`p-1 rounded border transition-colors ${
1568
- !node.faceOptions?.selectedMakeup || node.faceOptions?.selectedMakeup === "None"
1569
- ? "border-indigo-400 bg-indigo-500/20" // Highlighted when selected
1570
- : "border-white/20 hover:border-white/40" // Default and hover states
1571
- }`}
1572
- onClick={() => onUpdate(node.id, {
1573
- faceOptions: {
1574
  ...node.faceOptions, // Preserve other face options
1575
  selectedMakeup: "None", // Set makeup to none
1576
  makeupImage: null // Clear makeup image reference
@@ -1579,21 +1575,20 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1579
  title="No makeup application - natural look"
1580
  >
1581
  {/* Visual placeholder for no makeup option */}
1582
- <div className="w-full h-24 flex items-center justify-center text-xs text-white/60 border border-dashed border-white/20 rounded mb-1">
1583
  No Makeup {/* Text indicator for no makeup */}
1584
  </div>
1585
  <div className="text-xs">None</div> {/* Option label */}
1586
  </button>
1587
-
1588
  {/* Makeup Application Option - applies preset makeup style */}
1589
  <button
1590
- className={`p-1 rounded border transition-colors ${
1591
- node.faceOptions?.selectedMakeup === "Makeup"
1592
- ? "border-indigo-400 bg-indigo-500/20" // Highlighted when selected
1593
- : "border-white/20 hover:border-white/40" // Default and hover states
1594
- }`}
1595
- onClick={() => onUpdate(node.id, {
1596
- faceOptions: {
1597
  ...node.faceOptions, // Preserve other face options
1598
  selectedMakeup: "Makeup", // Set makeup type
1599
  makeupImage: "/makeup/makeup1.png" // Reference image for makeup style
@@ -1602,9 +1597,9 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1602
  title="Apply makeup style - enhances facial features"
1603
  >
1604
  {/* Makeup preview image */}
1605
- <img
1606
- src="/makeup/makeup1.png"
1607
- alt="Makeup Style Preview"
1608
  className="w-full h-24 object-contain rounded mb-1"
1609
  title="Preview of makeup style that will be applied"
1610
  />
@@ -1612,8 +1607,8 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1612
  </button>
1613
  </div>
1614
  </div>
1615
-
1616
- <Button
1617
  className="w-full"
1618
  onClick={() => onProcess(node.id)}
1619
  disabled={node.isRunning}
@@ -1675,7 +1670,7 @@ export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEn
1675
  export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1676
  // Handle node dragging functionality
1677
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1678
-
1679
  /**
1680
  * Available artistic style options with descriptive labels
1681
  * Each style represents a different artistic movement or pop culture aesthetic
@@ -1693,13 +1688,13 @@ export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onE
1693
  { value: "pixar", label: "Pixar Style" },
1694
  { value: "manga", label: "Manga Style" },
1695
  ];
1696
-
1697
  return (
1698
- <div
1699
- className="nb-node absolute text-white w-[320px]"
1700
  style={{ left: localPos.x, top: localPos.y }}
1701
  >
1702
- <div
1703
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1704
  onPointerDown={onPointerDown}
1705
  onPointerMove={onPointerMove}
@@ -1742,16 +1737,16 @@ export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onE
1742
  </Button>
1743
  </div>
1744
  )}
1745
- <div className="text-xs text-white/70">Art Style</div>
1746
- <div className="text-xs text-white/50 mb-2">Select an artistic style to apply to your image</div>
1747
  <Select
1748
- className="w-full bg-black border-white/20 text-white focus:border-white/40 [&>option]:bg-black [&>option]:text-white"
1749
  value={node.stylePreset || ""}
1750
  onChange={(e) => onUpdate(node.id, { stylePreset: (e.target as HTMLSelectElement).value })}
1751
  >
1752
- <option value="" className="bg-black">Select a style...</option>
1753
  {styleOptions.map(opt => (
1754
- <option key={opt.value} value={opt.value} className="bg-black">
1755
  {opt.label}
1756
  </option>
1757
  ))}
@@ -1764,21 +1759,21 @@ export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onE
1764
  min={0} // Minimum strength (subtle effect)
1765
  max={100} // Maximum strength (full style transfer)
1766
  value={node.styleStrength || 50} // Current value (default 50%)
1767
- onChange={(e) => onUpdate(node.id, {
1768
  styleStrength: parseInt((e.target as HTMLInputElement).value) // Update strength value
1769
  })}
1770
  title="Adjust how strongly the artistic style is applied - lower values are more subtle"
1771
  />
1772
  </div>
1773
  {/* Style Processing Button - triggers the style transfer operation */}
1774
- <Button
1775
  className="w-full"
1776
  onClick={() => onProcess(node.id)} // Start style transfer processing
1777
  disabled={node.isRunning || !node.stylePreset} // Disable if processing or no style selected
1778
  title={
1779
  !node.input ? "Connect an input first" : // No input connection
1780
- !node.stylePreset ? "Select a style first" : // No style selected
1781
- "Apply the selected artistic style to your input image" // Ready to process
1782
  }
1783
  >
1784
  {/* Dynamic button text based on processing state */}
@@ -1835,24 +1830,24 @@ export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onE
1835
  export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1836
  // Handle node dragging functionality
1837
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1838
-
1839
  /**
1840
  * Available lighting preset options with text descriptions
1841
  * Each preset uses detailed lighting prompts instead of reference images
1842
  */
1843
  const presetLightings = [
1844
- {
1845
- name: "Moody Cinematic",
1846
  path: "/lighting/light1.png",
1847
  prompt: "Moody cinematic portrait lighting with a sharp vertical beam of warm orange-red light cutting across the face and neck, contrasted with cool teal ambient fill on the surrounding areas. Strong chiaroscuro effect, deep shadows, high contrast between warm and cool tones, dramatic spotlight strip"
1848
  },
1849
- {
1850
- name: "Dual-Tone Neon",
1851
  path: "/lighting/light2.png",
1852
  prompt: "Cinematic portrait lighting with strong dual-tone rim lights: deep blue light illuminating the front-left side of the face, intense red light as a rim light from the back-right, dark black background, high contrast, minimal fill light, dramatic neon glow"
1853
  },
1854
- {
1855
- name: "Natural Shadow Play",
1856
  path: "/lighting/light3.png",
1857
  prompt: "DRAMATIC natural shadow play with hard directional sunlight filtering through foliage, creating bold contrasting patterns of light and shadow across the subject. Strong chiaroscuro effect with deep blacks and bright highlights, dappled leaf shadows dancing across face and body, creating an artistic interplay of illumination and darkness. Emphasize the sculptural quality of light carving through shadow, with sharp shadow edges and brilliant sun-kissed highlights for maximum visual impact"
1858
  },
@@ -1863,15 +1858,15 @@ export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection,
1863
  * Updates with the text prompt instead of reference image
1864
  */
1865
  const selectLighting = (lightingPath: string, lightingName: string, lightingPrompt: string) => {
1866
- onUpdate(node.id, {
1867
  lightingPrompt: lightingPrompt, // Text prompt for lighting effect
1868
  selectedLighting: lightingName // Name of selected lighting preset
1869
  });
1870
  };
1871
-
1872
  return (
1873
  <div className="nb-node absolute text-white w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
1874
- <div
1875
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1876
  onPointerDown={onPointerDown}
1877
  onPointerMove={onPointerMove}
@@ -1914,22 +1909,21 @@ export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection,
1914
  </Button>
1915
  </div>
1916
  )}
1917
- <div className="text-xs text-white/70">Lighting Presets</div>
1918
-
1919
  <div className="grid grid-cols-2 gap-2">
1920
  {presetLightings.map((preset) => (
1921
  <button
1922
  key={preset.name}
1923
- className={`p-2 rounded border ${
1924
- node.selectedLighting === preset.name
1925
- ? "border-indigo-400 bg-indigo-500/20"
1926
- : "border-white/20 hover:border-white/40"
1927
- }`}
1928
  onClick={() => selectLighting(preset.path, preset.name, preset.prompt)}
1929
  >
1930
- <img
1931
- src={preset.path}
1932
- alt={preset.name}
1933
  className="w-full h-24 object-contain rounded mb-1"
1934
  title="Click to select lighting"
1935
  />
@@ -1937,8 +1931,8 @@ export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection,
1937
  </button>
1938
  ))}
1939
  </div>
1940
-
1941
- <Button
1942
  className="w-full"
1943
  onClick={() => onProcess(node.id)}
1944
  disabled={node.isRunning || !node.selectedLighting}
@@ -1946,7 +1940,7 @@ export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection,
1946
  >
1947
  {node.isRunning ? "Processing..." : "Apply Lighting"}
1948
  </Button>
1949
-
1950
  <NodeOutputSection
1951
  nodeId={node.id}
1952
  output={node.output}
@@ -1999,29 +1993,29 @@ export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection,
1999
  export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
2000
  // Handle node dragging functionality
2001
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
2002
-
2003
  /**
2004
  * Available pose preset options with text descriptions
2005
  * Each preset uses detailed pose prompts instead of reference images
2006
  */
2007
  const presetPoses = [
2008
- {
2009
- name: "Dynamic Standing",
2010
  path: "/poses/stand1.png",
2011
  prompt: "A dynamic standing pose with the figure's weight shifted to one side. The right arm extends forward in a pointing gesture while the left arm hangs naturally. The figure has a slight hip tilt and appears to be in mid-movement, creating an energetic, directional composition."
2012
  },
2013
- {
2014
- name: "Arms Crossed",
2015
  path: "/poses/stand2.png",
2016
  prompt: "A relaxed standing pose with arms crossed over the torso. The weight is distributed fairly evenly, with one leg slightly forward. The figure's posture suggests a casual, confident stance with the head tilted slightly downward in a contemplative manner."
2017
  },
2018
- {
2019
- name: "Seated Composed",
2020
  path: "/poses/sit1.png",
2021
  prompt: "A seated pose on what appears to be a stool or high chair. The figure sits with legs crossed at the knee, creating an asymmetrical but balanced composition. The hands rest on the lap, and the overall posture is upright and composed."
2022
  },
2023
- {
2024
- name: "Relaxed Lean",
2025
  path: "/poses/sit2.png",
2026
  prompt: "A more relaxed seated pose with the figure leaning to one side. One leg is bent and raised while the other extends downward. The figure appears to be resting or in casual repose, with arms supporting the body and creating a diagonal flow through the composition."
2027
  },
@@ -2032,15 +2026,15 @@ export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onE
2032
  * Updates with the text prompt instead of reference image
2033
  */
2034
  const selectPose = (posePath: string, poseName: string, posePrompt: string) => {
2035
- onUpdate(node.id, {
2036
  posePrompt: posePrompt, // Text prompt for pose effect
2037
  selectedPose: poseName // Name of selected pose preset
2038
  });
2039
  };
2040
-
2041
  return (
2042
- <div className="nb-node absolute text-white w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
2043
- <div
2044
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
2045
  onPointerDown={onPointerDown}
2046
  onPointerMove={onPointerMove}
@@ -2084,21 +2078,20 @@ export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onE
2084
  </div>
2085
  )}
2086
  <div className="text-xs text-white/70">Pose References</div>
2087
-
2088
  <div className="grid grid-cols-2 gap-2">
2089
  {presetPoses.map((preset) => (
2090
  <button
2091
  key={preset.name}
2092
- className={`p-2 rounded border ${
2093
- node.selectedPose === preset.name
2094
- ? "border-indigo-400 bg-indigo-500/20"
2095
- : "border-white/20 hover:border-white/40"
2096
- }`}
2097
  onClick={() => selectPose(preset.path, preset.name, preset.prompt)}
2098
  >
2099
- <img
2100
- src={preset.path}
2101
- alt={preset.name}
2102
  className="w-full h-24 object-contain rounded mb-1"
2103
  title="Click to select pose"
2104
  />
@@ -2106,8 +2099,8 @@ export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onE
2106
  </button>
2107
  ))}
2108
  </div>
2109
-
2110
- <Button
2111
  className="w-full"
2112
  onClick={() => onProcess(node.id)}
2113
  disabled={node.isRunning || !node.selectedPose}
@@ -2115,7 +2108,7 @@ export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onE
2115
  >
2116
  {node.isRunning ? "Processing..." : "Apply Pose"}
2117
  </Button>
2118
-
2119
  <NodeOutputSection
2120
  nodeId={node.id}
2121
  output={node.output}
@@ -2164,12 +2157,11 @@ export function EditNodeView({
2164
  onUpdatePosition,
2165
  getNodeHistoryInfo,
2166
  navigateNodeHistory,
2167
- getCurrentNodeImage,
2168
- apiToken
2169
  }: any) {
2170
  // Use custom hook for drag functionality - handles position updates during dragging
2171
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
2172
-
2173
  /**
2174
  * Handle prompt improvement using Gemini API
2175
  * Takes the user's basic edit description and enhances it for better AI processing
@@ -2180,7 +2172,7 @@ export function EditNodeView({
2180
  alert('Please enter an edit description first');
2181
  return;
2182
  }
2183
-
2184
  try {
2185
  // Call the API to improve the prompt
2186
  const response = await fetch('/api/improve-prompt', {
@@ -2188,11 +2180,10 @@ export function EditNodeView({
2188
  headers: { 'Content-Type': 'application/json' },
2189
  body: JSON.stringify({
2190
  prompt: node.editPrompt.trim(),
2191
- type: 'edit',
2192
- apiToken: apiToken || undefined
2193
  })
2194
  });
2195
-
2196
  if (response.ok) {
2197
  const { improvedPrompt } = await response.json();
2198
  onUpdate(node.id, { editPrompt: improvedPrompt });
@@ -2211,7 +2202,7 @@ export function EditNodeView({
2211
  const handleDeleteNode = (e: React.MouseEvent) => {
2212
  e.stopPropagation(); // Prevent triggering drag
2213
  e.preventDefault();
2214
-
2215
  if (confirm('Delete this node?')) {
2216
  onDelete(node.id);
2217
  }
@@ -2232,9 +2223,9 @@ export function EditNodeView({
2232
  };
2233
 
2234
  return (
2235
- <div className="nb-node absolute text-white w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
2236
  {/* Node Header - Contains title, delete button, and connection ports */}
2237
- <div
2238
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
2239
  onPointerDown={onPointerDown} // Start dragging
2240
  onPointerMove={onPointerMove} // Handle drag movement
@@ -2242,10 +2233,10 @@ export function EditNodeView({
2242
  >
2243
  {/* Input port (left side) - where connections come in */}
2244
  <Port className="in" nodeId={node.id} isOutput={false} onEndConnection={onEndConnection} onDisconnect={(nodeId) => onUpdate(nodeId, { input: undefined })} />
2245
-
2246
  {/* Node title */}
2247
  <div className="font-semibold text-sm flex-1 text-center">EDIT</div>
2248
-
2249
  <div className="flex items-center gap-1">
2250
  {/* Delete button */}
2251
  <Button
@@ -2259,12 +2250,12 @@ export function EditNodeView({
2259
  >
2260
  ×
2261
  </Button>
2262
-
2263
  {/* Output port (right side) - where connections go out */}
2264
  <Port className="out" nodeId={node.id} isOutput={true} onStartConnection={onStartConnection} />
2265
  </div>
2266
  </div>
2267
-
2268
  {/* Node Content - Contains all the controls and outputs */}
2269
  {/* Node Content Area - Contains all controls, inputs, and outputs */}
2270
  <div className="p-3 space-y-3">
@@ -2282,10 +2273,10 @@ export function EditNodeView({
2282
  </Button>
2283
  </div>
2284
  )}
2285
-
2286
  {/* Edit prompt input and improvement section */}
2287
  <div className="space-y-2">
2288
- <div className="text-xs text-white/70 mb-1">Edit Instructions</div>
2289
  <Textarea
2290
  className="w-full"
2291
  placeholder="Describe what to edit (e.g., 'make it brighter', 'add more contrast', 'make it look vintage')"
@@ -2293,7 +2284,7 @@ export function EditNodeView({
2293
  onChange={handlePromptChange}
2294
  rows={3}
2295
  />
2296
-
2297
  {/* AI-powered prompt improvement button */}
2298
  <Button
2299
  variant="outline"
@@ -2306,28 +2297,28 @@ export function EditNodeView({
2306
  ✨ Improve with Gemini
2307
  </Button>
2308
  </div>
2309
-
2310
  {/* Process button - starts the editing operation */}
2311
- <Button
2312
  className="w-full"
2313
  onClick={() => onProcess(node.id)}
2314
  disabled={node.isRunning || !node.editPrompt?.trim()}
2315
  title={
2316
- !node.input ? "Connect an input first" :
2317
- !node.editPrompt?.trim() ? "Enter edit instructions first" :
2318
- "Apply the edit to the input image"
2319
  }
2320
  >
2321
  {node.isRunning ? "Processing..." : "Apply Edit"}
2322
  </Button>
2323
-
2324
  {/* Output section with history navigation and download */}
2325
  <NodeOutputSection
2326
  nodeId={node.id}
2327
  output={node.output}
2328
  downloadFileName={`edit-${Date.now()}.png`}
2329
  />
2330
-
2331
  {/* Error display */}
2332
  {node.error && (
2333
  <div className="text-xs text-red-400 mt-2 p-2 bg-red-900/20 rounded">
 
89
  // Fetch the data URL and convert it to a Blob object
90
  const response = await fetch(dataUrl); // Fetch the base64 data URL
91
  const blob = await response.blob(); // Convert response to Blob format
92
+
93
  // The browser clipboard API only supports PNG format for images
94
  // If the image is not PNG, we need to convert it first
95
  if (blob.type !== 'image/png') {
 
97
  const canvas = document.createElement('canvas'); // Create invisible canvas
98
  const ctx = canvas.getContext('2d'); // Get 2D drawing context
99
  const img = new Image(); // Create image element
100
+
101
  // Wait for the image to load before processing
102
  await new Promise((resolve) => {
103
  img.onload = () => { // When image loads
 
108
  };
109
  img.src = dataUrl; // Start loading the image
110
  });
111
+
112
  // Convert the canvas content to PNG blob
113
  const pngBlob = await new Promise<Blob>((resolve) => {
114
  canvas.toBlob((blob) => resolve(blob!), 'image/png'); // Convert canvas to PNG blob
115
  });
116
+
117
  // Write the converted PNG blob to clipboard
118
  await navigator.clipboard.write([
119
  new ClipboardItem({ 'image/png': pngBlob }) // Create clipboard item with PNG data
 
169
  }) {
170
  // If no image is available, don't render anything
171
  if (!output) return null;
172
+
173
  return (
174
  // Main container for output section with vertical spacing
175
  <div className="space-y-2">
 
181
  <div className="text-xs text-white/70">Output</div>
182
  </div>
183
  {/* Output image with click-to-copy functionality */}
184
+ <img
185
  src={output} // Display the output image
186
  className="w-full rounded cursor-pointer hover:opacity-80 transition-all duration-200 hover:ring-2 hover:ring-white/30" // Styling with hover effects
187
  alt="Output" // Accessibility description
 
189
  onContextMenu={(e) => { // Right-click context menu handler
190
  e.preventDefault(); // Prevent browser context menu from appearing
191
  copyImageToClipboard(output); // Copy image to clipboard
192
+
193
  // Show brief visual feedback when image is copied
194
  const img = e.currentTarget; // Get the image element
195
  const originalTitle = img.title; // Store original tooltip text
196
  img.title = "Copied to clipboard!"; // Update tooltip to show success
197
  img.style.filter = "brightness(1.2)"; // Brighten the image briefly
198
  img.style.transform = "scale(0.98)"; // Slightly scale down the image
199
+
200
  // Reset visual feedback after 300ms
201
  setTimeout(() => {
202
  img.title = originalTitle; // Restore original tooltip
 
270
  const [localPos, setLocalPos] = useState({ x: node.x, y: node.y }); // Local position for smooth dragging
271
  const dragging = useRef(false); // Track drag state
272
  const start = useRef<{ sx: number; sy: number; ox: number; oy: number } | null>(null); // Drag start coordinates
273
+
274
  // Sync local position when parent position changes
275
  useEffect(() => {
276
  setLocalPos({ x: node.x, y: node.y });
277
  }, [node.x, node.y]);
278
+
279
  /**
280
  * Handle pointer down - start dragging
281
  * Captures the pointer and records starting positions
 
286
  start.current = { sx: e.clientX, sy: e.clientY, ox: localPos.x, oy: localPos.y }; // Record start positions
287
  (e.currentTarget as HTMLElement).setPointerCapture(e.pointerId); // Capture pointer for reliable tracking
288
  };
289
+
290
  /**
291
  * Handle pointer move - update position during drag
292
  * Calculates new position based on mouse movement delta
 
300
  setLocalPos({ x: newX, y: newY }); // Update local position for immediate visual feedback
301
  if (onUpdatePosition) onUpdatePosition(node.id, newX, newY); // Update parent state
302
  };
303
+
304
  /**
305
  * Handle pointer up - end dragging
306
  * Releases pointer capture and resets drag state
 
310
  start.current = null; // Clear start position
311
  (e.currentTarget as HTMLElement).releasePointerCapture(e.pointerId); // Release pointer
312
  };
313
+
314
  return { localPos, onPointerDown, onPointerMove, onPointerUp };
315
  }
316
 
 
331
  * @param onStartConnection Callback when starting a connection from this port
332
  * @param onEndConnection Callback when ending a connection at this port
333
  */
334
+ function Port({
335
+ className,
336
  nodeId,
337
  isOutput,
338
  onStartConnection,
339
  onEndConnection,
340
  onDisconnect
341
+ }: {
342
  className?: string;
343
  nodeId?: string;
344
  isOutput?: boolean;
 
355
  onStartConnection(nodeId); // Start connection from this output port
356
  }
357
  };
358
+
359
  /**
360
  * Handle ending a connection (pointer up on input port)
361
  */
 
378
  };
379
 
380
  return (
381
+ <div
382
  className={cx("nb-port", className)} // Combine base port classes with custom ones
383
  onPointerDown={handlePointerDown} // Start connection drag from output ports
384
  onPointerUp={handlePointerUp} // End connection drag at input ports
385
  onPointerEnter={handlePointerUp} // Also accept connections on hover (better UX)
386
  onClick={handleClick} // Allow clicking input ports to disconnect
387
  title={
388
+ isOutput
389
+ ? "Drag from here to connect to another node's input"
390
  : "Drop connections here or click to disconnect"
391
  }
392
  />
 
429
  onEndConnection,
430
  onProcess,
431
  onUpdatePosition,
 
432
  }: any) {
433
  // Use custom drag hook to handle node positioning in the editor
434
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
435
+
436
  /**
437
  * Handle image file upload from file input
438
  * Converts uploaded file to base64 data URL for storage and preview
 
446
  reader.readAsDataURL(e.target.files[0]); // Convert file to base64
447
  }
448
  };
449
+
450
  /**
451
  * Handle image paste from clipboard
452
  * Supports both image files and image URLs pasted from clipboard
453
  */
454
  const handleImagePaste = (e: React.ClipboardEvent) => {
455
  const items = e.clipboardData.items; // Get clipboard items
456
+
457
  // First, try to find image files in clipboard
458
  for (let i = 0; i < items.length; i++) {
459
  if (items[i].type.startsWith("image/")) { // Check if item is an image
 
468
  }
469
  }
470
  }
471
+
472
  // If no image files, check for text that might be image URLs
473
  const text = e.clipboardData.getData("text"); // Get text from clipboard
474
  if (text && (text.startsWith("http") || text.startsWith("data:image"))) {
475
  onUpdate(node.id, { customBackgroundImage: text }); // Use URL directly
476
  }
477
  };
478
+
479
  const handleDrop = (e: React.DragEvent) => {
480
  e.preventDefault();
481
  const files = e.dataTransfer.files;
 
487
  reader.readAsDataURL(files[0]);
488
  }
489
  };
490
+
491
  return (
492
+ <div
493
+ className="nb-node absolute text-white w-[320px]"
494
  style={{ left: localPos.x, top: localPos.y }}
495
  onDrop={handleDrop}
496
  onDragOver={(e) => e.preventDefault()}
497
  onPaste={handleImagePaste}
498
  >
499
+ <div
500
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
501
  onPointerDown={onPointerDown}
502
  onPointerMove={onPointerMove}
 
539
  </Button>
540
  </div>
541
  )}
542
+ <Select
543
  className="w-full"
544
  value={node.backgroundType || "color"}
545
  onChange={(e) => onUpdate(node.id, { backgroundType: (e.target as HTMLSelectElement).value })}
 
552
  <option value="upload">Upload Image</option>
553
  <option value="custom">Custom Prompt</option>
554
  </Select>
555
+
556
  {node.backgroundType === "color" && (
557
  <ColorPicker
558
  className="w-full"
 
560
  onChange={(e) => onUpdate(node.id, { backgroundColor: (e.target as HTMLInputElement).value })}
561
  />
562
  )}
563
+
564
  {node.backgroundType === "gradient" && (
565
  <div className="space-y-3">
566
  <label className="text-xs text-white/70">Gradient Direction</label>
567
+ <Select
568
  className="w-full"
569
  value={node.gradientDirection || "to right"}
570
  onChange={(e) => onUpdate(node.id, { gradientDirection: (e.target as HTMLSelectElement).value })}
 
591
  value={node.gradientEndColor || "#4ecdc4"}
592
  onChange={(e) => onUpdate(node.id, { gradientEndColor: (e.target as HTMLInputElement).value })}
593
  />
594
+ <div
595
  className="w-full h-8 rounded-md border border-white/20"
596
  style={{
597
+ background: node.gradientDirection === "radial"
598
  ? `radial-gradient(circle, ${node.gradientStartColor || "#ff6b6b"} 0%, ${node.gradientEndColor || "#4ecdc4"} 100%)`
599
  : `linear-gradient(${node.gradientDirection || "to right"}, ${node.gradientStartColor || "#ff6b6b"} 0%, ${node.gradientEndColor || "#4ecdc4"} 100%)`
600
  }}
 
602
  />
603
  </div>
604
  )}
605
+
606
  {node.backgroundType === "image" && (
607
+ <Select
608
  className="w-full"
609
  value={node.backgroundImage || ""}
610
  onChange={(e) => onUpdate(node.id, { backgroundImage: (e.target as HTMLSelectElement).value })}
 
617
  <option value="city">City Skyline</option>
618
  </Select>
619
  )}
620
+
621
  {node.backgroundType === "city" && (
622
  <div className="space-y-3">
623
  <label className="text-xs text-white/70">City Scene Type</label>
624
+ <Select
625
  className="w-full"
626
  value={node.citySceneType || "busy_street"}
627
  onChange={(e) => onUpdate(node.id, { citySceneType: (e.target as HTMLSelectElement).value })}
 
639
  <option value="matrix_alley">Matrix Style Urban Alley</option>
640
  </Select>
641
  <label className="text-xs text-white/70">Time of Day</label>
642
+ <Select
643
  className="w-full"
644
  value={node.cityTimeOfDay || "daytime"}
645
  onChange={(e) => onUpdate(node.id, { cityTimeOfDay: (e.target as HTMLSelectElement).value })}
 
653
  </Select>
654
  </div>
655
  )}
656
+
657
  {node.backgroundType === "photostudio" && (
658
  <div className="space-y-3">
659
  <label className="text-xs text-white/70">Studio Setup</label>
660
+ <Select
661
  className="w-full"
662
  value={node.studioSetup || "white_seamless"}
663
  onChange={(e) => onUpdate(node.id, { studioSetup: (e.target as HTMLSelectElement).value })}
 
680
  </>
681
  )}
682
  <label className="text-xs text-white/70">Lighting Setup</label>
683
+ <Select
684
  className="w-full"
685
  value={node.studioLighting || "key_fill"}
686
  onChange={(e) => onUpdate(node.id, { studioLighting: (e.target as HTMLSelectElement).value })}
 
703
  </div>
704
  </div>
705
  )}
706
+
707
  {node.backgroundType === "upload" && (
708
  <div className="space-y-2">
709
  {node.customBackgroundImage ? (
710
  <div className="relative">
711
  <img src={node.customBackgroundImage} className="w-full rounded" alt="Custom Background" />
712
+ <Button
713
  variant="destructive"
714
  size="sm"
715
  className="absolute top-2 right-2"
 
734
  )}
735
  </div>
736
  )}
737
+
738
  {node.backgroundType === "custom" && (
739
  <div className="space-y-2">
740
  <Textarea
 
753
  alert('Please enter a background description first');
754
  return;
755
  }
756
+
757
  try {
758
  const response = await fetch('/api/improve-prompt', {
759
  method: 'POST',
760
  headers: { 'Content-Type': 'application/json' },
761
  body: JSON.stringify({
762
  prompt: node.customPrompt,
763
+ type: 'background'
 
764
  })
765
  });
766
+
767
  if (response.ok) {
768
  const { improvedPrompt } = await response.json();
769
  onUpdate(node.id, { customPrompt: improvedPrompt });
 
781
  </Button>
782
  </div>
783
  )}
784
+
785
+ <Button
786
  className="w-full"
787
  onClick={() => onProcess(node.id)}
788
  disabled={node.isRunning}
 
790
  >
791
  {node.isRunning ? "Processing..." : "Apply Background"}
792
  </Button>
793
+
794
  <NodeOutputSection
795
  nodeId={node.id}
796
  output={node.output}
 
834
  export function ClothesNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
835
  // Handle node dragging functionality
836
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
837
+
838
  /**
839
  * Preset clothing options available for quick selection
840
  * Each preset includes a display name and path to the reference image
 
878
  const selectPreset = (presetPath: string, presetName: string) => {
879
  onUpdate(node.id, { clothesImage: presetPath, selectedPreset: presetName });
880
  };
881
+
882
  return (
883
+ <div
884
+ className="nb-node absolute w-[320px]"
885
  style={{ left: localPos.x, top: localPos.y }}
886
  onDrop={onDrop}
887
  onDragOver={(e) => e.preventDefault()}
888
  onPaste={onPaste}
889
  >
890
+ <div
891
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
892
  onPointerDown={onPointerDown}
893
  onPointerMove={onPointerMove}
 
930
  </Button>
931
  </div>
932
  )}
933
+ <div className="text-xs text-muted-foreground">Clothes Reference</div>
934
+
935
  {/* Preset clothes options */}
936
  <div className="flex gap-2">
937
  {presetClothes.map((preset) => (
938
  <button
939
  key={preset.name}
940
+ className={`flex-1 p-2 rounded border ${node.selectedPreset === preset.name
941
+ ? "border-primary bg-primary/20"
942
+ : "border-border hover:border-primary/50"
943
+ }`}
 
944
  onClick={() => selectPreset(preset.path, preset.name)}
945
  >
946
  <img src={preset.path} alt={preset.name} className="w-full h-28 object-contain rounded mb-1" />
 
948
  </button>
949
  ))}
950
  </div>
951
+
952
+ <div className="text-xs text-muted-foreground/50 text-center">— or —</div>
953
+
954
  {/* Custom image upload */}
955
  {node.clothesImage && !node.selectedPreset ? (
956
  <div className="relative">
957
  <img src={node.clothesImage} className="w-full rounded" alt="Clothes" />
958
+ <Button
959
  variant="destructive"
960
  size="sm"
961
  className="absolute top-2 right-2"
 
978
  }
979
  }}
980
  />
981
+ <div className="border-2 border-dashed border-border rounded-lg p-6 text-center cursor-pointer hover:border-primary/50 transition-colors">
982
+ <div className="text-muted-foreground/40 text-lg mb-2">📁</div>
983
+ <p className="text-sm text-muted-foreground font-medium">Drop, upload, or paste clothes image</p>
984
+ <p className="text-xs text-muted-foreground/50 mt-1">JPG, PNG, WebP supported</p>
985
  </div>
986
  </label>
987
  ) : null}
988
+
989
+ <Button
990
  className="w-full"
991
  onClick={() => onProcess(node.id)}
992
  disabled={node.isRunning || !node.clothesImage}
 
1040
  export function AgeNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1041
  // Handle node dragging functionality
1042
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1043
+
1044
  return (
1045
+ <div className="nb-node absolute w-[280px]" style={{ left: localPos.x, top: localPos.y }}>
1046
+ <div
1047
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1048
  onPointerDown={onPointerDown}
1049
  onPointerMove={onPointerMove}
 
1096
  onChange={(e) => onUpdate(node.id, { targetAge: parseInt((e.target as HTMLInputElement).value) })}
1097
  />
1098
  </div>
1099
+ <Button
1100
  className="w-full"
1101
  onClick={() => onProcess(node.id)}
1102
  disabled={node.isRunning}
 
1155
  export function CameraNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1156
  // Handle node dragging functionality
1157
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1158
+
1159
  // Camera lens focal length options (affects field of view and perspective)
1160
+ const focalLengths = ["None", "8mm", "12mm", "24mm", "35mm", "50mm", "85mm"];
1161
+
1162
  // Aperture settings (affects depth of field and exposure)
1163
+ const apertures = ["None", "f/0.95", "f/1.2", "f/1.4", "f/1.8", "f/2", "f/2.8", "f/4", "f/5.6", "f/11"];
1164
+
1165
  // Shutter speed options (affects motion blur and exposure)
1166
+ const shutterSpeeds = ["None", "1/1000s", "1/250s", "1/30s", "1/15", "5s",];
1167
+
1168
  // White balance presets for different lighting conditions
1169
  const whiteBalances = ["None", "2800K candlelight", "3200K tungsten", "4000K fluorescent", "5600K daylight", "6500K cloudy", "7000K shade", "8000K blue sky"];
1170
+
1171
  // Camera angle and perspective options
1172
  const angles = ["None", "eye level", "low angle", "high angle", "Dutch tilt", "bird's eye", "worm's eye", "over the shoulder", "POV"];
1173
+
1174
  // ISO sensitivity values (affects image noise and exposure)
1175
+ const isoValues = ["None", "ISO 100", "ISO 400", "ISO 1600", "ISO 6400"];
1176
+
1177
  // Film stock emulation for different photographic styles
1178
+ const filmStyles = ["None", "RAW", "Kodak Portra", "Fuji Velvia", "Kodak Gold 200", "Black & White", "Sepia", "Vintage", "Film Noir"];
1179
+
1180
  // Professional lighting setups and natural lighting conditions
1181
  const lightingTypes = ["None", "Natural Light", "Golden Hour", "Blue Hour", "Studio Lighting", "Rembrandt", "Split Lighting", "Butterfly Lighting", "Loop Lighting", "Rim Lighting", "Silhouette", "High Key", "Low Key"];
1182
+
1183
  // Bokeh (background blur) styles for different lens characteristics
1184
  const bokehStyles = ["None", "Smooth Bokeh", "Swirly Bokeh", "Hexagonal Bokeh", "Cat Eye Bokeh", "Bubble Bokeh"];
1185
 
 
1187
  const motionBlurOptions = ["None", "Light Motion Blur", "Medium Motion Blur", "Heavy Motion Blur", "Radial Blur", "Zoom Blur"];
1188
 
1189
  return (
1190
+ <div className="nb-node absolute w-[360px]" style={{ left: localPos.x, top: localPos.y }}>
1191
+ <div
1192
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1193
  onPointerDown={onPointerDown}
1194
  onPointerMove={onPointerMove}
 
1231
  </div>
1232
  )}
1233
  {/* Basic Camera Settings Section */}
1234
+ <div className="text-xs text-muted-foreground font-semibold mb-1">Basic Settings</div>
1235
  <div className="grid grid-cols-2 gap-2"> {/* 2-column grid for compact layout */}
1236
  {/* Motion Blur Control - adds movement effects */}
1237
  <div>
1238
+ <label className="text-xs text-muted-foreground">Motion Blur</label>
1239
+ <Select
1240
  className="w-full"
1241
  value={node.motionBlur || "None"} // Default to "None" if not set
1242
  onChange={(e) => onUpdate(node.id, { motionBlur: (e.target as HTMLSelectElement).value })}
 
1247
  </div>
1248
  {/* Focal Length Control - affects field of view and perspective */}
1249
  <div>
1250
+ <label className="text-xs text-muted-foreground">Focal Length</label>
1251
+ <Select
1252
  className="w-full"
1253
  value={node.focalLength || "None"} // Default to "None" if not set
1254
  onChange={(e) => onUpdate(node.id, { focalLength: (e.target as HTMLSelectElement).value })}
 
1257
  {focalLengths.map(f => <option key={f} value={f}>{f}</option>)}
1258
  </Select>
1259
  </div>
1260
+
1261
  {/* Aperture Control - affects depth of field and exposure */}
1262
  <div>
1263
+ <label className="text-xs text-muted-foreground">Aperture</label>
1264
+ <Select
1265
  className="w-full"
1266
  value={node.aperture || "None"} // Default to "None" if not set
1267
  onChange={(e) => onUpdate(node.id, { aperture: (e.target as HTMLSelectElement).value })}
 
1270
  {apertures.map(a => <option key={a} value={a}>{a}</option>)}
1271
  </Select>
1272
  </div>
1273
+
1274
  {/* Shutter Speed Control - affects motion blur and exposure */}
1275
  <div>
1276
+ <label className="text-xs text-muted-foreground">Shutter Speed</label>
1277
+ <Select
1278
  className="w-full"
1279
  value={node.shutterSpeed || "None"} // Default to "None" if not set
1280
  onChange={(e) => onUpdate(node.id, { shutterSpeed: (e.target as HTMLSelectElement).value })}
 
1283
  {shutterSpeeds.map(s => <option key={s} value={s}>{s}</option>)}
1284
  </Select>
1285
  </div>
1286
+
1287
  {/* ISO Control - affects sensor sensitivity and image noise */}
1288
  <div>
1289
+ <label className="text-xs text-muted-foreground">ISO</label>
1290
+ <Select
1291
  className="w-full"
1292
  value={node.iso || "None"} // Default to "None" if not set
1293
  onChange={(e) => onUpdate(node.id, { iso: (e.target as HTMLSelectElement).value })}
 
1297
  </Select>
1298
  </div>
1299
  </div>
1300
+
1301
  {/* Creative Settings */}
1302
  <div className="text-xs text-white/50 font-semibold mb-1 mt-3">Creative Settings</div>
1303
  <div className="grid grid-cols-2 gap-2">
1304
  <div>
1305
+ <label className="text-xs text-muted-foreground">White Balance</label>
1306
+ <Select
1307
  className="w-full"
1308
  value={node.whiteBalance || "None"}
1309
  onChange={(e) => onUpdate(node.id, { whiteBalance: (e.target as HTMLSelectElement).value })}
 
1312
  </Select>
1313
  </div>
1314
  <div>
1315
+ <label className="text-xs text-muted-foreground">Film Style</label>
1316
+ <Select
1317
  className="w-full"
1318
  value={node.filmStyle || "None"}
1319
  onChange={(e) => onUpdate(node.id, { filmStyle: (e.target as HTMLSelectElement).value })}
 
1322
  </Select>
1323
  </div>
1324
  <div>
1325
+ <label className="text-xs text-muted-foreground">Lighting</label>
1326
+ <Select
1327
  className="w-full"
1328
  value={node.lighting || "None"}
1329
  onChange={(e) => onUpdate(node.id, { lighting: (e.target as HTMLSelectElement).value })}
 
1332
  </Select>
1333
  </div>
1334
  <div>
1335
+ <label className="text-xs text-muted-foreground">Bokeh Style</label>
1336
+ <Select
1337
  className="w-full"
1338
  value={node.bokeh || "None"}
1339
  onChange={(e) => onUpdate(node.id, { bokeh: (e.target as HTMLSelectElement).value })}
 
1342
  </Select>
1343
  </div>
1344
  </div>
1345
+
1346
  {/* Composition Settings */}
1347
+ <div className="text-xs text-muted-foreground font-semibold mb-1 mt-3">Composition</div>
1348
  <div className="grid grid-cols-2 gap-2">
1349
  <div>
1350
+ <label className="text-xs text-muted-foreground">Camera Angle</label>
1351
+ <Select
1352
  className="w-full"
1353
  value={node.angle || "None"}
1354
  onChange={(e) => onUpdate(node.id, { angle: (e.target as HTMLSelectElement).value })}
 
1357
  </Select>
1358
  </div>
1359
  </div>
1360
+ <Button
1361
  className="w-full"
1362
  onClick={() => onProcess(node.id)}
1363
  disabled={node.isRunning}
 
1416
  export function FaceNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1417
  // Handle node dragging functionality
1418
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1419
+
1420
  // Available hairstyle options for hair modification
1421
  const hairstyles = ["None", "short", "long", "curly", "straight", "bald", "mohawk", "ponytail"];
1422
+
1423
  // Facial expression options for emotion changes
1424
  const expressions = ["None", "happy", "serious", "smiling", "laughing", "sad", "surprised", "angry"];
1425
+
1426
  // Beard and facial hair styling options
1427
  const beardStyles = ["None", "stubble", "goatee", "full beard", "mustache", "clean shaven"];
1428
 
1429
  return (
1430
+ <div className="nb-node absolute w-[340px]" style={{ left: localPos.x, top: localPos.y }}>
1431
+ <div
1432
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1433
  onPointerDown={onPointerDown}
1434
  onPointerMove={onPointerMove}
 
1474
  <div className="space-y-2">
1475
  {/* Pimple removal option for skin enhancement */}
1476
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1477
+ <Checkbox
1478
  checked={node.faceOptions?.removePimples || false} // Default to false if not set
1479
+ onChange={(e) => onUpdate(node.id, {
1480
+ faceOptions: {
1481
  ...node.faceOptions, // Preserve existing options
1482
  removePimples: (e.target as HTMLInputElement).checked // Update pimple removal setting
1483
  }
 
1485
  />
1486
  Remove pimples {/* Clean up skin imperfections */}
1487
  </label>
1488
+
1489
  {/* Sunglasses addition option */}
1490
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1491
+ <Checkbox
1492
  checked={node.faceOptions?.addSunglasses || false} // Default to false if not set
1493
+ onChange={(e) => onUpdate(node.id, {
1494
+ faceOptions: {
1495
  ...node.faceOptions, // Preserve existing options
1496
  addSunglasses: (e.target as HTMLInputElement).checked // Update sunglasses setting
1497
  }
 
1499
  />
1500
  Add sunglasses {/* Add stylish sunglasses accessory */}
1501
  </label>
1502
+
1503
  {/* Hat addition option */}
1504
  <label className="flex items-center gap-2 text-xs cursor-pointer">
1505
+ <Checkbox
1506
  checked={node.faceOptions?.addHat || false} // Default to false if not set
1507
+ onChange={(e) => onUpdate(node.id, {
1508
+ faceOptions: {
1509
  ...node.faceOptions, // Preserve existing options
1510
  addHat: (e.target as HTMLInputElement).checked // Update hat setting
1511
  }
 
1514
  Add hat {/* Add hat accessory */}
1515
  </label>
1516
  </div>
1517
+
1518
  <div>
1519
  <label className="text-xs text-white/70">Hairstyle</label>
1520
+ <Select
1521
  className="w-full"
1522
  value={node.faceOptions?.changeHairstyle || "None"}
1523
+ onChange={(e) => onUpdate(node.id, {
1524
  faceOptions: { ...node.faceOptions, changeHairstyle: (e.target as HTMLSelectElement).value }
1525
  })}
1526
  >
1527
  {hairstyles.map(h => <option key={h} value={h}>{h}</option>)}
1528
  </Select>
1529
  </div>
1530
+
1531
  <div>
1532
  <label className="text-xs text-white/70">Expression</label>
1533
+ <Select
1534
  className="w-full"
1535
  value={node.faceOptions?.facialExpression || "None"}
1536
+ onChange={(e) => onUpdate(node.id, {
1537
  faceOptions: { ...node.faceOptions, facialExpression: (e.target as HTMLSelectElement).value }
1538
  })}
1539
  >
1540
  {expressions.map(e => <option key={e} value={e}>{e}</option>)}
1541
  </Select>
1542
  </div>
1543
+
1544
  <div>
1545
  <label className="text-xs text-white/70">Beard</label>
1546
+ <Select
1547
  className="w-full"
1548
  value={node.faceOptions?.beardStyle || "None"}
1549
+ onChange={(e) => onUpdate(node.id, {
1550
  faceOptions: { ...node.faceOptions, beardStyle: (e.target as HTMLSelectElement).value }
1551
  })}
1552
  >
 
1556
 
1557
  {/* Makeup Selection Section - allows users to choose makeup application */}
1558
  <div>
1559
+ <label className="text-xs text-muted-foreground">Makeup</label>
1560
  <div className="grid grid-cols-2 gap-2 mt-2"> {/* 2-column grid for makeup options */}
1561
+
1562
  {/* No Makeup Option - removes or prevents makeup application */}
1563
  <button
1564
+ className={`p-1 rounded border transition-colors ${!node.faceOptions?.selectedMakeup || node.faceOptions?.selectedMakeup === "None"
1565
+ ? "border-indigo-400 bg-indigo-500/20" // Highlighted when selected
1566
+ : "border-white/20 hover:border-white/40" // Default and hover states
1567
+ }`}
1568
+ onClick={() => onUpdate(node.id, {
1569
+ faceOptions: {
 
1570
  ...node.faceOptions, // Preserve other face options
1571
  selectedMakeup: "None", // Set makeup to none
1572
  makeupImage: null // Clear makeup image reference
 
1575
  title="No makeup application - natural look"
1576
  >
1577
  {/* Visual placeholder for no makeup option */}
1578
+ <div className="w-full h-24 flex items-center justify-center text-xs text-muted-foreground/60 border border-dashed border-border rounded mb-1">
1579
  No Makeup {/* Text indicator for no makeup */}
1580
  </div>
1581
  <div className="text-xs">None</div> {/* Option label */}
1582
  </button>
1583
+
1584
  {/* Makeup Application Option - applies preset makeup style */}
1585
  <button
1586
+ className={`p-1 rounded border transition-colors ${node.faceOptions?.selectedMakeup === "Makeup"
1587
+ ? "border-primary bg-primary/20" // Highlighted when selected
1588
+ : "border-border hover:border-primary/50" // Default and hover states
1589
+ }`}
1590
+ onClick={() => onUpdate(node.id, {
1591
+ faceOptions: {
 
1592
  ...node.faceOptions, // Preserve other face options
1593
  selectedMakeup: "Makeup", // Set makeup type
1594
  makeupImage: "/makeup/makeup1.png" // Reference image for makeup style
 
1597
  title="Apply makeup style - enhances facial features"
1598
  >
1599
  {/* Makeup preview image */}
1600
+ <img
1601
+ src="/makeup/makeup1.png"
1602
+ alt="Makeup Style Preview"
1603
  className="w-full h-24 object-contain rounded mb-1"
1604
  title="Preview of makeup style that will be applied"
1605
  />
 
1607
  </button>
1608
  </div>
1609
  </div>
1610
+
1611
+ <Button
1612
  className="w-full"
1613
  onClick={() => onProcess(node.id)}
1614
  disabled={node.isRunning}
 
1670
  export function StyleNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1671
  // Handle node dragging functionality
1672
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1673
+
1674
  /**
1675
  * Available artistic style options with descriptive labels
1676
  * Each style represents a different artistic movement or pop culture aesthetic
 
1688
  { value: "pixar", label: "Pixar Style" },
1689
  { value: "manga", label: "Manga Style" },
1690
  ];
1691
+
1692
  return (
1693
+ <div
1694
+ className="nb-node absolute w-[320px]"
1695
  style={{ left: localPos.x, top: localPos.y }}
1696
  >
1697
+ <div
1698
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1699
  onPointerDown={onPointerDown}
1700
  onPointerMove={onPointerMove}
 
1737
  </Button>
1738
  </div>
1739
  )}
1740
+ <div className="text-xs text-muted-foreground">Art Style</div>
1741
+ <div className="text-xs text-muted-foreground/50 mb-2">Select an artistic style to apply to your image</div>
1742
  <Select
1743
+ className="w-full bg-background border-border text-foreground focus:border-ring [&>option]:bg-background [&>option]:text-foreground"
1744
  value={node.stylePreset || ""}
1745
  onChange={(e) => onUpdate(node.id, { stylePreset: (e.target as HTMLSelectElement).value })}
1746
  >
1747
+ <option value="" className="bg-background">Select a style...</option>
1748
  {styleOptions.map(opt => (
1749
+ <option key={opt.value} value={opt.value} className="bg-background">
1750
  {opt.label}
1751
  </option>
1752
  ))}
 
1759
  min={0} // Minimum strength (subtle effect)
1760
  max={100} // Maximum strength (full style transfer)
1761
  value={node.styleStrength || 50} // Current value (default 50%)
1762
+ onChange={(e) => onUpdate(node.id, {
1763
  styleStrength: parseInt((e.target as HTMLInputElement).value) // Update strength value
1764
  })}
1765
  title="Adjust how strongly the artistic style is applied - lower values are more subtle"
1766
  />
1767
  </div>
1768
  {/* Style Processing Button - triggers the style transfer operation */}
1769
+ <Button
1770
  className="w-full"
1771
  onClick={() => onProcess(node.id)} // Start style transfer processing
1772
  disabled={node.isRunning || !node.stylePreset} // Disable if processing or no style selected
1773
  title={
1774
  !node.input ? "Connect an input first" : // No input connection
1775
+ !node.stylePreset ? "Select a style first" : // No style selected
1776
+ "Apply the selected artistic style to your input image" // Ready to process
1777
  }
1778
  >
1779
  {/* Dynamic button text based on processing state */}
 
1830
  export function LightningNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1831
  // Handle node dragging functionality
1832
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1833
+
1834
  /**
1835
  * Available lighting preset options with text descriptions
1836
  * Each preset uses detailed lighting prompts instead of reference images
1837
  */
1838
  const presetLightings = [
1839
+ {
1840
+ name: "Moody Cinematic",
1841
  path: "/lighting/light1.png",
1842
  prompt: "Moody cinematic portrait lighting with a sharp vertical beam of warm orange-red light cutting across the face and neck, contrasted with cool teal ambient fill on the surrounding areas. Strong chiaroscuro effect, deep shadows, high contrast between warm and cool tones, dramatic spotlight strip"
1843
  },
1844
+ {
1845
+ name: "Dual-Tone Neon",
1846
  path: "/lighting/light2.png",
1847
  prompt: "Cinematic portrait lighting with strong dual-tone rim lights: deep blue light illuminating the front-left side of the face, intense red light as a rim light from the back-right, dark black background, high contrast, minimal fill light, dramatic neon glow"
1848
  },
1849
+ {
1850
+ name: "Natural Shadow Play",
1851
  path: "/lighting/light3.png",
1852
  prompt: "DRAMATIC natural shadow play with hard directional sunlight filtering through foliage, creating bold contrasting patterns of light and shadow across the subject. Strong chiaroscuro effect with deep blacks and bright highlights, dappled leaf shadows dancing across face and body, creating an artistic interplay of illumination and darkness. Emphasize the sculptural quality of light carving through shadow, with sharp shadow edges and brilliant sun-kissed highlights for maximum visual impact"
1853
  },
 
1858
  * Updates with the text prompt instead of reference image
1859
  */
1860
  const selectLighting = (lightingPath: string, lightingName: string, lightingPrompt: string) => {
1861
+ onUpdate(node.id, {
1862
  lightingPrompt: lightingPrompt, // Text prompt for lighting effect
1863
  selectedLighting: lightingName // Name of selected lighting preset
1864
  });
1865
  };
1866
+
1867
  return (
1868
  <div className="nb-node absolute text-white w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
1869
+ <div
1870
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
1871
  onPointerDown={onPointerDown}
1872
  onPointerMove={onPointerMove}
 
1909
  </Button>
1910
  </div>
1911
  )}
1912
+ <div className="text-xs text-muted-foreground">Lighting Presets</div>
1913
+
1914
  <div className="grid grid-cols-2 gap-2">
1915
  {presetLightings.map((preset) => (
1916
  <button
1917
  key={preset.name}
1918
+ className={`p-2 rounded border ${node.selectedLighting === preset.name
1919
+ ? "border-primary bg-primary/20"
1920
+ : "border-border hover:border-primary/50"
1921
+ }`}
 
1922
  onClick={() => selectLighting(preset.path, preset.name, preset.prompt)}
1923
  >
1924
+ <img
1925
+ src={preset.path}
1926
+ alt={preset.name}
1927
  className="w-full h-24 object-contain rounded mb-1"
1928
  title="Click to select lighting"
1929
  />
 
1931
  </button>
1932
  ))}
1933
  </div>
1934
+
1935
+ <Button
1936
  className="w-full"
1937
  onClick={() => onProcess(node.id)}
1938
  disabled={node.isRunning || !node.selectedLighting}
 
1940
  >
1941
  {node.isRunning ? "Processing..." : "Apply Lighting"}
1942
  </Button>
1943
+
1944
  <NodeOutputSection
1945
  nodeId={node.id}
1946
  output={node.output}
 
1993
  export function PosesNodeView({ node, onDelete, onUpdate, onStartConnection, onEndConnection, onProcess, onUpdatePosition, getNodeHistoryInfo, navigateNodeHistory, getCurrentNodeImage }: any) {
1994
  // Handle node dragging functionality
1995
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
1996
+
1997
  /**
1998
  * Available pose preset options with text descriptions
1999
  * Each preset uses detailed pose prompts instead of reference images
2000
  */
2001
  const presetPoses = [
2002
+ {
2003
+ name: "Dynamic Standing",
2004
  path: "/poses/stand1.png",
2005
  prompt: "A dynamic standing pose with the figure's weight shifted to one side. The right arm extends forward in a pointing gesture while the left arm hangs naturally. The figure has a slight hip tilt and appears to be in mid-movement, creating an energetic, directional composition."
2006
  },
2007
+ {
2008
+ name: "Arms Crossed",
2009
  path: "/poses/stand2.png",
2010
  prompt: "A relaxed standing pose with arms crossed over the torso. The weight is distributed fairly evenly, with one leg slightly forward. The figure's posture suggests a casual, confident stance with the head tilted slightly downward in a contemplative manner."
2011
  },
2012
+ {
2013
+ name: "Seated Composed",
2014
  path: "/poses/sit1.png",
2015
  prompt: "A seated pose on what appears to be a stool or high chair. The figure sits with legs crossed at the knee, creating an asymmetrical but balanced composition. The hands rest on the lap, and the overall posture is upright and composed."
2016
  },
2017
+ {
2018
+ name: "Relaxed Lean",
2019
  path: "/poses/sit2.png",
2020
  prompt: "A more relaxed seated pose with the figure leaning to one side. One leg is bent and raised while the other extends downward. The figure appears to be resting or in casual repose, with arms supporting the body and creating a diagonal flow through the composition."
2021
  },
 
2026
  * Updates with the text prompt instead of reference image
2027
  */
2028
  const selectPose = (posePath: string, poseName: string, posePrompt: string) => {
2029
+ onUpdate(node.id, {
2030
  posePrompt: posePrompt, // Text prompt for pose effect
2031
  selectedPose: poseName // Name of selected pose preset
2032
  });
2033
  };
2034
+
2035
  return (
2036
+ <div className="nb-node absolute w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
2037
+ <div
2038
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
2039
  onPointerDown={onPointerDown}
2040
  onPointerMove={onPointerMove}
 
2078
  </div>
2079
  )}
2080
  <div className="text-xs text-white/70">Pose References</div>
2081
+
2082
  <div className="grid grid-cols-2 gap-2">
2083
  {presetPoses.map((preset) => (
2084
  <button
2085
  key={preset.name}
2086
+ className={`p-2 rounded border ${node.selectedPose === preset.name
2087
+ ? "border-indigo-400 bg-indigo-500/20"
2088
+ : "border-white/20 hover:border-white/40"
2089
+ }`}
 
2090
  onClick={() => selectPose(preset.path, preset.name, preset.prompt)}
2091
  >
2092
+ <img
2093
+ src={preset.path}
2094
+ alt={preset.name}
2095
  className="w-full h-24 object-contain rounded mb-1"
2096
  title="Click to select pose"
2097
  />
 
2099
  </button>
2100
  ))}
2101
  </div>
2102
+
2103
+ <Button
2104
  className="w-full"
2105
  onClick={() => onProcess(node.id)}
2106
  disabled={node.isRunning || !node.selectedPose}
 
2108
  >
2109
  {node.isRunning ? "Processing..." : "Apply Pose"}
2110
  </Button>
2111
+
2112
  <NodeOutputSection
2113
  nodeId={node.id}
2114
  output={node.output}
 
2157
  onUpdatePosition,
2158
  getNodeHistoryInfo,
2159
  navigateNodeHistory,
2160
+ getCurrentNodeImage
 
2161
  }: any) {
2162
  // Use custom hook for drag functionality - handles position updates during dragging
2163
  const { localPos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(node, onUpdatePosition);
2164
+
2165
  /**
2166
  * Handle prompt improvement using Gemini API
2167
  * Takes the user's basic edit description and enhances it for better AI processing
 
2172
  alert('Please enter an edit description first');
2173
  return;
2174
  }
2175
+
2176
  try {
2177
  // Call the API to improve the prompt
2178
  const response = await fetch('/api/improve-prompt', {
 
2180
  headers: { 'Content-Type': 'application/json' },
2181
  body: JSON.stringify({
2182
  prompt: node.editPrompt.trim(),
2183
+ type: 'edit'
 
2184
  })
2185
  });
2186
+
2187
  if (response.ok) {
2188
  const { improvedPrompt } = await response.json();
2189
  onUpdate(node.id, { editPrompt: improvedPrompt });
 
2202
  const handleDeleteNode = (e: React.MouseEvent) => {
2203
  e.stopPropagation(); // Prevent triggering drag
2204
  e.preventDefault();
2205
+
2206
  if (confirm('Delete this node?')) {
2207
  onDelete(node.id);
2208
  }
 
2223
  };
2224
 
2225
  return (
2226
+ <div className="nb-node absolute w-[320px]" style={{ left: localPos.x, top: localPos.y }}>
2227
  {/* Node Header - Contains title, delete button, and connection ports */}
2228
+ <div
2229
  className="nb-header px-3 py-2 flex items-center justify-between rounded-t-[14px] cursor-grab active:cursor-grabbing"
2230
  onPointerDown={onPointerDown} // Start dragging
2231
  onPointerMove={onPointerMove} // Handle drag movement
 
2233
  >
2234
  {/* Input port (left side) - where connections come in */}
2235
  <Port className="in" nodeId={node.id} isOutput={false} onEndConnection={onEndConnection} onDisconnect={(nodeId) => onUpdate(nodeId, { input: undefined })} />
2236
+
2237
  {/* Node title */}
2238
  <div className="font-semibold text-sm flex-1 text-center">EDIT</div>
2239
+
2240
  <div className="flex items-center gap-1">
2241
  {/* Delete button */}
2242
  <Button
 
2250
  >
2251
  ×
2252
  </Button>
2253
+
2254
  {/* Output port (right side) - where connections go out */}
2255
  <Port className="out" nodeId={node.id} isOutput={true} onStartConnection={onStartConnection} />
2256
  </div>
2257
  </div>
2258
+
2259
  {/* Node Content - Contains all the controls and outputs */}
2260
  {/* Node Content Area - Contains all controls, inputs, and outputs */}
2261
  <div className="p-3 space-y-3">
 
2273
  </Button>
2274
  </div>
2275
  )}
2276
+
2277
  {/* Edit prompt input and improvement section */}
2278
  <div className="space-y-2">
2279
+ <div className="text-xs text-muted-foreground mb-1">Edit Instructions</div>
2280
  <Textarea
2281
  className="w-full"
2282
  placeholder="Describe what to edit (e.g., 'make it brighter', 'add more contrast', 'make it look vintage')"
 
2284
  onChange={handlePromptChange}
2285
  rows={3}
2286
  />
2287
+
2288
  {/* AI-powered prompt improvement button */}
2289
  <Button
2290
  variant="outline"
 
2297
  ✨ Improve with Gemini
2298
  </Button>
2299
  </div>
2300
+
2301
  {/* Process button - starts the editing operation */}
2302
+ <Button
2303
  className="w-full"
2304
  onClick={() => onProcess(node.id)}
2305
  disabled={node.isRunning || !node.editPrompt?.trim()}
2306
  title={
2307
+ !node.input ? "Connect an input first" :
2308
+ !node.editPrompt?.trim() ? "Enter edit instructions first" :
2309
+ "Apply the edit to the input image"
2310
  }
2311
  >
2312
  {node.isRunning ? "Processing..." : "Apply Edit"}
2313
  </Button>
2314
+
2315
  {/* Output section with history navigation and download */}
2316
  <NodeOutputSection
2317
  nodeId={node.id}
2318
  output={node.output}
2319
  downloadFileName={`edit-${Date.now()}.png`}
2320
  />
2321
+
2322
  {/* Error display */}
2323
  {node.error && (
2324
  <div className="text-xs text-red-400 mt-2 p-2 bg-red-900/20 rounded">
app/page.tsx CHANGED
@@ -66,10 +66,10 @@ const uid = () => Math.random().toString(36).slice(2, 9);
66
  */
67
  function generateMergePrompt(characterData: { image: string; label: string }[]): string {
68
  const count = characterData.length;
69
-
70
  // Create a summary of all images being processed
71
  const labels = characterData.map((d, i) => `Image ${i + 1} (${d.label})`).join(", ");
72
-
73
  // Return comprehensive prompt with specific instructions for natural-looking merge
74
  return `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${count} provided images.
75
 
@@ -109,13 +109,13 @@ async function copyImageToClipboard(dataUrl: string) {
109
  try {
110
  const response = await fetch(dataUrl);
111
  const blob = await response.blob();
112
-
113
  // Convert to PNG if not already PNG
114
  if (blob.type !== 'image/png') {
115
  const canvas = document.createElement('canvas');
116
  const ctx = canvas.getContext('2d');
117
  const img = new Image();
118
-
119
  await new Promise((resolve) => {
120
  img.onload = () => {
121
  canvas.width = img.width;
@@ -125,11 +125,11 @@ async function copyImageToClipboard(dataUrl: string) {
125
  };
126
  img.src = dataUrl;
127
  });
128
-
129
  const pngBlob = await new Promise<Blob>((resolve) => {
130
  canvas.toBlob((blob) => resolve(blob!), 'image/png');
131
  });
132
-
133
  await navigator.clipboard.write([
134
  new ClipboardItem({ 'image/png': pngBlob })
135
  ]);
@@ -198,24 +198,24 @@ type BackgroundNode = NodeBase & {
198
  output?: string; // Processed image with new background
199
  backgroundType: "color" | "gradient" | "image" | "city" | "photostudio" | "upload" | "custom"; // Type of background to apply
200
  backgroundColor?: string; // Hex color code for solid color backgrounds
201
-
202
  // Gradient background properties
203
  gradientDirection?: string; // Direction of gradient (to right, to bottom, radial, etc.)
204
  gradientStartColor?: string; // Starting color of gradient
205
  gradientEndColor?: string; // Ending color of gradient
206
-
207
  backgroundImage?: string; // URL/path for preset background images
208
-
209
  // City scene properties
210
  citySceneType?: string; // Type of city scene (busy_street, times_square, etc.)
211
  cityTimeOfDay?: string; // Time of day for city scene
212
-
213
  // Photo studio properties
214
  studioSetup?: string; // Studio background setup type
215
  studioBackgroundColor?: string; // Color for colored seamless background
216
  studioLighting?: string; // Studio lighting setup
217
  faceCamera?: boolean; // Whether to position character facing camera
218
-
219
  customBackgroundImage?: string; // User-uploaded background image data
220
  customPrompt?: string; // AI prompt for generating custom backgrounds
221
  isRunning?: boolean; // Processing state indicator
@@ -440,7 +440,7 @@ function screenToWorld(
440
 
441
  function useNodeDrag(
442
  nodeId: string,
443
- scaleRef: React.MutableRefObject<number>,
444
  initial: { x: number; y: number },
445
  onUpdatePosition: (id: string, x: number, y: number) => void
446
  ) {
@@ -449,11 +449,11 @@ function useNodeDrag(
449
  const start = useRef<{ sx: number; sy: number; ox: number; oy: number } | null>(
450
  null
451
  );
452
-
453
  useEffect(() => {
454
  setLocalPos(initial);
455
  }, [initial.x, initial.y]);
456
-
457
  const onPointerDown = (e: React.PointerEvent) => {
458
  e.stopPropagation();
459
  dragging.current = true;
@@ -477,13 +477,13 @@ function useNodeDrag(
477
  return { pos: localPos, onPointerDown, onPointerMove, onPointerUp };
478
  }
479
 
480
- function Port({
481
- className,
482
  nodeId,
483
  isOutput,
484
  onStartConnection,
485
  onEndConnection
486
- }: {
487
  className?: string;
488
  nodeId?: string;
489
  isOutput?: boolean;
@@ -496,7 +496,7 @@ function Port({
496
  onStartConnection(nodeId);
497
  }
498
  };
499
-
500
  const handlePointerUp = (e: React.PointerEvent) => {
501
  e.stopPropagation();
502
  if (!isOutput && nodeId && onEndConnection) {
@@ -505,8 +505,8 @@ function Port({
505
  };
506
 
507
  return (
508
- <div
509
- className={cx("nb-port", className)}
510
  onPointerDown={handlePointerDown}
511
  onPointerUp={handlePointerUp}
512
  onPointerEnter={handlePointerUp}
@@ -533,7 +533,7 @@ function CharacterNodeView({
533
  }) {
534
  const { pos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(
535
  node.id,
536
- scaleRef,
537
  { x: node.x, y: node.y },
538
  onUpdatePosition
539
  );
@@ -570,7 +570,7 @@ function CharacterNodeView({
570
 
571
  return (
572
  <div
573
- className="nb-node absolute text-white w-[340px] select-none"
574
  style={{ left: pos.x, top: pos.y }}
575
  onDrop={onDrop}
576
  onDragOver={(e) => e.preventDefault()}
@@ -588,7 +588,7 @@ function CharacterNodeView({
588
  onChange={(e) => onChangeLabel(node.id, e.target.value)}
589
  />
590
  <div className="flex items-center gap-2">
591
- <Button
592
  variant="ghost" size="icon" className="text-destructive hover:bg-destructive/20 h-6 w-6"
593
  onClick={(e) => {
594
  e.stopPropagation();
@@ -603,8 +603,8 @@ function CharacterNodeView({
603
  >
604
  ×
605
  </Button>
606
- <Port
607
- className="out"
608
  nodeId={node.id}
609
  isOutput={true}
610
  onStartConnection={onStartConnection}
@@ -612,7 +612,7 @@ function CharacterNodeView({
612
  </div>
613
  </div>
614
  <div className="p-3 space-y-3">
615
- <div className="aspect-[4/5] w-full rounded-xl bg-black/40 grid place-items-center overflow-hidden">
616
  <img
617
  src={node.image}
618
  alt="character"
@@ -637,12 +637,12 @@ function CharacterNodeView({
637
  await navigator.clipboard.write([
638
  new ClipboardItem({ [blob.type]: blob })
639
  ]);
640
-
641
  // Show visual feedback
642
  const img = e.currentTarget;
643
  const originalFilter = img.style.filter;
644
  img.style.filter = "brightness(1.2)";
645
-
646
  setTimeout(() => {
647
  img.style.filter = originalFilter;
648
  }, 500);
@@ -654,7 +654,7 @@ function CharacterNodeView({
654
  />
655
  </div>
656
  <div className="flex gap-2">
657
- <label className="text-xs bg-white/10 hover:bg-white/20 rounded px-3 py-1 cursor-pointer">
658
  Upload
659
  <input
660
  type="file"
@@ -668,20 +668,20 @@ function CharacterNodeView({
668
  // Reset input safely
669
  try {
670
  e.currentTarget.value = "";
671
- } catch {}
672
  }
673
  }}
674
  />
675
  </label>
676
  <button
677
- className="text-xs bg-white/10 hover:bg-white/20 rounded px-3 py-1"
678
  onClick={async () => {
679
  try {
680
  const text = await navigator.clipboard.readText();
681
  if (text && (text.startsWith("http") || text.startsWith("data:image"))) {
682
  onChangeImage(node.id, text);
683
  }
684
- } catch {}
685
  }}
686
  >
687
  Paste URL
@@ -724,15 +724,15 @@ function MergeNodeView({
724
 
725
 
726
  return (
727
- <div className="nb-node absolute text-white w-[420px]" style={{ left: pos.x, top: pos.y }}>
728
  <div
729
  className="nb-header cursor-grab active:cursor-grabbing rounded-t-[14px] px-3 py-2 flex items-center justify-between"
730
  onPointerDown={onPointerDown}
731
  onPointerMove={onPointerMove}
732
  onPointerUp={onPointerUp}
733
  >
734
- <Port
735
- className="in"
736
  nodeId={node.id}
737
  isOutput={false}
738
  onEndConnection={onEndConnection}
@@ -756,8 +756,8 @@ function MergeNodeView({
756
  >
757
  ×
758
  </Button>
759
- <Port
760
- className="out"
761
  nodeId={node.id}
762
  isOutput={true}
763
  onStartConnection={onStartConnection}
@@ -765,16 +765,16 @@ function MergeNodeView({
765
  </div>
766
  </div>
767
  <div className="p-3 space-y-3">
768
- <div className="text-xs text-white/70">Inputs</div>
769
  <div className="flex flex-wrap gap-2">
770
  {node.inputs.map((id) => {
771
  const inputNode = allNodes.find((n) => n.id === id);
772
  if (!inputNode) return null;
773
-
774
  // Get image and label based on node type
775
  let image: string | null = null;
776
  let label = "";
777
-
778
  if (inputNode.type === "CHARACTER") {
779
  image = (inputNode as CharacterNode).image;
780
  label = (inputNode as CharacterNode).label || "Character";
@@ -789,14 +789,14 @@ function MergeNodeView({
789
  // Node without output yet
790
  label = `${inputNode.type} (pending)`;
791
  }
792
-
793
  return (
794
- <div key={id} className="flex items-center gap-2 bg-white/10 rounded px-2 py-1">
795
  {image && (
796
- <div className="w-6 h-6 rounded overflow-hidden bg-black/20">
797
- <img
798
- src={image}
799
- className="w-full h-full object-contain cursor-pointer hover:opacity-80"
800
  alt="inp"
801
  onClick={async () => {
802
  try {
@@ -817,12 +817,12 @@ function MergeNodeView({
817
  await navigator.clipboard.write([
818
  new ClipboardItem({ [blob.type]: blob })
819
  ]);
820
-
821
  // Show visual feedback
822
  const img = e.currentTarget;
823
  const originalFilter = img.style.filter;
824
  img.style.filter = "brightness(1.2)";
825
-
826
  setTimeout(() => {
827
  img.style.filter = originalFilter;
828
  }, 300);
@@ -874,9 +874,9 @@ function MergeNodeView({
874
  </div>
875
  <div className="w-full min-h-[200px] max-h-[400px] rounded-xl bg-black/40 grid place-items-center">
876
  {node.output ? (
877
- <img
878
- src={node.output}
879
- className="w-full h-auto max-h-[400px] object-contain rounded-xl cursor-pointer hover:opacity-80 transition-opacity"
880
  alt="output"
881
  onClick={async () => {
882
  if (node.output) {
@@ -900,12 +900,12 @@ function MergeNodeView({
900
  await navigator.clipboard.write([
901
  new ClipboardItem({ [blob.type]: blob })
902
  ]);
903
-
904
  // Show visual feedback
905
  const img = e.currentTarget;
906
  const originalFilter = img.style.filter;
907
  img.style.filter = "brightness(1.2)";
908
-
909
  setTimeout(() => {
910
  img.style.filter = originalFilter;
911
  }, 500);
@@ -1032,7 +1032,7 @@ export default function EditorPage() {
1032
  alert('OAuth client ID not configured. Please check environment variables.');
1033
  return;
1034
  }
1035
-
1036
  window.location.href = await oauthLoginUrl({
1037
  clientId,
1038
  redirectUrl: `${window.location.origin}/api/auth/callback`
@@ -1042,12 +1042,41 @@ export default function EditorPage() {
1042
 
1043
  // Connection dragging state
1044
  const [draggingFrom, setDraggingFrom] = useState<string | null>(null);
1045
- const [dragPos, setDragPos] = useState<{x: number, y: number} | null>(null);
1046
-
1047
  // API Token state (restored for manual review)
1048
  const [apiToken, setApiToken] = useState("");
1049
  const [showHelpSidebar, setShowHelpSidebar] = useState(false);
1050
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051
  // HF PRO AUTHENTICATION
1052
  const [isHfProLoggedIn, setIsHfProLoggedIn] = useState(false);
1053
  const [isCheckingAuth, setIsCheckingAuth] = useState(true);
@@ -1143,9 +1172,9 @@ export default function EditorPage() {
1143
  // - MERGE nodes (can have output after merging)
1144
  // - Any processing node (BACKGROUND, CLOTHES, BLEND, etc.)
1145
  // - Even unprocessed nodes (for configuration chaining)
1146
-
1147
  // All nodes can be connected for chaining
1148
- setNodes(prev => prev.map(n =>
1149
  n.id === nodeId ? { ...n, input: draggingFrom } : n
1150
  ));
1151
  }
@@ -1161,14 +1190,14 @@ export default function EditorPage() {
1161
  const countPendingConfigurations = (startNodeId: string): number => {
1162
  let count = 0;
1163
  const visited = new Set<string>();
1164
-
1165
  const traverse = (nodeId: string) => {
1166
  if (visited.has(nodeId)) return;
1167
  visited.add(nodeId);
1168
-
1169
  const node = nodes.find(n => n.id === nodeId);
1170
  if (!node) return;
1171
-
1172
  // Check if this node has configuration but no output
1173
  if (!(node as any).output && node.type !== "CHARACTER" && node.type !== "MERGE") {
1174
  const config = getNodeConfiguration(node);
@@ -1176,22 +1205,22 @@ export default function EditorPage() {
1176
  count++;
1177
  }
1178
  }
1179
-
1180
  // Check upstream
1181
  const upstreamId = (node as any).input;
1182
  if (upstreamId) {
1183
  traverse(upstreamId);
1184
  }
1185
  };
1186
-
1187
  traverse(startNodeId);
1188
  return count;
1189
  };
1190
-
1191
  // Helper to extract configuration from a node
1192
  const getNodeConfiguration = (node: AnyNode): Record<string, unknown> => {
1193
  const config: Record<string, unknown> = {};
1194
-
1195
  switch (node.type) {
1196
  case "BACKGROUND":
1197
  if ((node as BackgroundNode).backgroundType) {
@@ -1201,20 +1230,20 @@ export default function EditorPage() {
1201
  config.backgroundImage = bgNode.backgroundImage;
1202
  config.customBackgroundImage = bgNode.customBackgroundImage;
1203
  config.customPrompt = bgNode.customPrompt;
1204
-
1205
  // Gradient properties
1206
  if (bgNode.backgroundType === "gradient") {
1207
  config.gradientDirection = bgNode.gradientDirection;
1208
  config.gradientStartColor = bgNode.gradientStartColor;
1209
  config.gradientEndColor = bgNode.gradientEndColor;
1210
  }
1211
-
1212
  // City scene properties
1213
  if (bgNode.backgroundType === "city") {
1214
  config.citySceneType = bgNode.citySceneType;
1215
  config.cityTimeOfDay = bgNode.cityTimeOfDay;
1216
  }
1217
-
1218
  // Photo studio properties
1219
  if (bgNode.backgroundType === "photostudio") {
1220
  config.studioSetup = bgNode.studioSetup;
@@ -1295,7 +1324,7 @@ export default function EditorPage() {
1295
  }
1296
  break;
1297
  }
1298
-
1299
  return config;
1300
  };
1301
 
@@ -1312,34 +1341,34 @@ export default function EditorPage() {
1312
  let accumulatedParams: any = {};
1313
  const processedNodes: string[] = []; // Track which nodes' configs we're applying
1314
  const inputId = (node as any).input;
1315
-
1316
  if (inputId) {
1317
  // Track unprocessed MERGE nodes that need to be executed
1318
  const unprocessedMerges: MergeNode[] = [];
1319
-
1320
  // Find the source image by traversing the chain backwards
1321
  const findSourceImage = (currentNodeId: string, visited: Set<string> = new Set()): string | null => {
1322
  if (visited.has(currentNodeId)) return null;
1323
  visited.add(currentNodeId);
1324
-
1325
  const currentNode = nodes.find(n => n.id === currentNodeId);
1326
  if (!currentNode) return null;
1327
-
1328
  // If this is a CHARACTER node, return its image
1329
  if (currentNode.type === "CHARACTER") {
1330
  return (currentNode as CharacterNode).image;
1331
  }
1332
-
1333
  // If this is a MERGE node with output, return its output
1334
  if (currentNode.type === "MERGE" && (currentNode as MergeNode).output) {
1335
  return (currentNode as MergeNode).output || null;
1336
  }
1337
-
1338
  // If any node has been processed, return its output
1339
  if ((currentNode as any).output) {
1340
  return (currentNode as any).output;
1341
  }
1342
-
1343
  // For MERGE nodes without output, we need to process them first
1344
  if (currentNode.type === "MERGE") {
1345
  const merge = currentNode as MergeNode;
@@ -1355,39 +1384,39 @@ export default function EditorPage() {
1355
  if (inputImage) return inputImage;
1356
  }
1357
  }
1358
-
1359
  // Otherwise, check upstream
1360
  const upstreamId = (currentNode as any).input;
1361
  if (upstreamId) {
1362
  return findSourceImage(upstreamId, visited);
1363
  }
1364
-
1365
  return null;
1366
  };
1367
-
1368
  // Collect all configurations from unprocessed nodes in the chain
1369
  const collectConfigurations = (currentNodeId: string, visited: Set<string> = new Set()): any => {
1370
  if (visited.has(currentNodeId)) return {};
1371
  visited.add(currentNodeId);
1372
-
1373
  const currentNode = nodes.find(n => n.id === currentNodeId);
1374
  if (!currentNode) return {};
1375
-
1376
  let configs: any = {};
1377
-
1378
  // First, collect from upstream nodes
1379
  const upstreamId = (currentNode as any).input;
1380
  if (upstreamId) {
1381
  configs = collectConfigurations(upstreamId, visited);
1382
  }
1383
-
1384
  // Add this node's configuration only if:
1385
  // 1. It's the current node being processed, OR
1386
  // 2. It hasn't been processed yet (no output) AND it's not the current node
1387
- const shouldIncludeConfig =
1388
  currentNodeId === nodeId || // Always include current node's config
1389
  (!(currentNode as any).output && currentNodeId !== nodeId); // Include unprocessed intermediate nodes
1390
-
1391
  if (shouldIncludeConfig) {
1392
  const nodeConfig = getNodeConfiguration(currentNode);
1393
  if (Object.keys(nodeConfig).length > 0) {
@@ -1398,66 +1427,66 @@ export default function EditorPage() {
1398
  }
1399
  }
1400
  }
1401
-
1402
  return configs;
1403
  };
1404
-
1405
  // Find the source image
1406
  inputImage = findSourceImage(inputId);
1407
-
1408
  // If we found unprocessed merges, we need to execute them first
1409
  if (unprocessedMerges.length > 0 && !inputImage) {
1410
-
1411
  // Process each merge node
1412
  for (const merge of unprocessedMerges) {
1413
  // Set loading state for the merge
1414
- setNodes(prev => prev.map(n =>
1415
  n.id === merge.id ? { ...n, isRunning: true, error: null } : n
1416
  ));
1417
-
1418
  try {
1419
  const mergeOutput = await executeMerge(merge);
1420
-
1421
  // Update the merge node with output
1422
- setNodes(prev => prev.map(n =>
1423
  n.id === merge.id ? { ...n, output: mergeOutput || undefined, isRunning: false, error: null } : n
1424
  ));
1425
-
1426
  // Track that we processed this merge as part of the chain
1427
  processedNodes.push(merge.id);
1428
-
1429
  // Now use this as our input image if it's the direct input
1430
  if (inputId === merge.id) {
1431
  inputImage = mergeOutput;
1432
  }
1433
  } catch (e: any) {
1434
  console.error("Auto-merge error:", e);
1435
- setNodes(prev => prev.map(n =>
1436
  n.id === merge.id ? { ...n, isRunning: false, error: e?.message || "Merge failed" } : n
1437
  ));
1438
  // Abort the main processing if merge failed
1439
- setNodes(prev => prev.map(n =>
1440
  n.id === nodeId ? { ...n, error: "Failed to process upstream MERGE node", isRunning: false } : n
1441
  ));
1442
  return;
1443
  }
1444
  }
1445
-
1446
  // After processing merges, try to find the source image again
1447
  if (!inputImage) {
1448
  inputImage = findSourceImage(inputId);
1449
  }
1450
  }
1451
-
1452
  // Collect configurations from the chain
1453
  accumulatedParams = collectConfigurations(inputId, new Set());
1454
  }
1455
 
1456
  if (!inputImage) {
1457
- const errorMsg = inputId
1458
  ? "No source image found in the chain. Connect to a CHARACTER node or processed node."
1459
  : "No input connected. Connect an image source to this node.";
1460
- setNodes(prev => prev.map(n =>
1461
  n.id === nodeId ? { ...n, error: errorMsg, isRunning: false } : n
1462
  ));
1463
  return;
@@ -1466,11 +1495,11 @@ export default function EditorPage() {
1466
  // Add current node's configuration
1467
  const currentNodeConfig = getNodeConfiguration(node);
1468
  const params = { ...accumulatedParams, ...currentNodeConfig };
1469
-
1470
  // Count how many unprocessed nodes we're combining
1471
- const unprocessedNodeCount = Object.keys(params).length > 0 ?
1472
  (processedNodes.length + 1) : 1;
1473
-
1474
  // Show info about batch processing
1475
  if (unprocessedNodeCount > 1) {
1476
  } else {
@@ -1489,7 +1518,7 @@ export default function EditorPage() {
1489
  if (inputImage && inputImage.length > 10 * 1024 * 1024) { // 10MB limit warning
1490
  console.warn("Large input image detected, size:", (inputImage.length / (1024 * 1024)).toFixed(2) + "MB");
1491
  }
1492
-
1493
  // Check if params contains custom images and validate them
1494
  if (params.clothesImage) {
1495
  // Validate it's a proper data URL
@@ -1497,49 +1526,48 @@ export default function EditorPage() {
1497
  throw new Error("Invalid clothes image format. Please upload a valid image.");
1498
  }
1499
  }
1500
-
1501
  if (params.customBackgroundImage) {
1502
  // Validate it's a proper data URL
1503
  if (!params.customBackgroundImage.startsWith('data:') && !params.customBackgroundImage.startsWith('http') && !params.customBackgroundImage.startsWith('/')) {
1504
  throw new Error("Invalid background image format. Please upload a valid image.");
1505
  }
1506
  }
1507
-
1508
  // Log request details for debugging
1509
-
1510
- // ORIGINAL PROCESSING LOGIC RESTORED (HF processing commented out)
1511
- /*
1512
- // Only use HF + fal.ai processing
1513
- if (!isHfProLoggedIn) {
1514
- setNodes(prev => prev.map(n =>
1515
- n.id === nodeId ? { ...n, error: "Please login with HF Pro to use fal.ai processing", isRunning: false } : n
1516
- ));
1517
- return;
1518
- }
1519
 
1520
- // Make a SINGLE API call with fal.ai processing
1521
- const res = await fetch("/api/hf-process", {
1522
- method: "POST",
1523
- headers: { "Content-Type": "application/json" },
1524
- body: JSON.stringify({
1525
- type: "COMBINED",
1526
- image: inputImage,
1527
- params
1528
- }),
1529
- });
1530
- */
1531
-
1532
- // Make a SINGLE API call with all accumulated parameters
1533
- const res = await fetch("/api/process", {
1534
- method: "POST",
1535
- headers: { "Content-Type": "application/json" },
1536
- body: JSON.stringify({
1537
- type: "COMBINED", // Indicate this is a combined processing
1538
- image: inputImage,
1539
- params,
1540
- apiToken: apiToken || undefined
1541
- }),
1542
- });
 
 
 
 
 
 
 
 
 
1543
 
1544
  // Check if response is actually JSON before parsing
1545
  const contentType = res.headers.get("content-type");
@@ -1552,8 +1580,8 @@ export default function EditorPage() {
1552
  const data = await res.json();
1553
  if (!res.ok) {
1554
  // Handle both string and object error formats
1555
- const errorMessage = typeof data.error === 'string'
1556
- ? data.error
1557
  : data.error?.message || JSON.stringify(data.error) || "Processing failed";
1558
  throw new Error(errorMessage);
1559
  }
@@ -1573,11 +1601,11 @@ export default function EditorPage() {
1573
  }));
1574
 
1575
  // Add to node's history
1576
- const description = unprocessedNodeCount > 1
1577
  ? `Combined ${unprocessedNodeCount} transformations`
1578
  : `${node.type} transformation`;
1579
-
1580
-
1581
  if (unprocessedNodeCount > 1) {
1582
  }
1583
  } catch (e: any) {
@@ -1660,13 +1688,13 @@ export default function EditorPage() {
1660
  // Get images from merge inputs - now accepts any node type
1661
  const mergeImages: string[] = [];
1662
  const inputData: { image: string; label: string }[] = [];
1663
-
1664
  for (const inputId of merge.inputs) {
1665
  const inputNode = nodes.find(n => n.id === inputId);
1666
  if (inputNode) {
1667
  let image: string | null = null;
1668
  let label = "";
1669
-
1670
  if (inputNode.type === "CHARACTER") {
1671
  image = (inputNode as CharacterNode).image;
1672
  label = (inputNode as CharacterNode).label || "";
@@ -1680,7 +1708,7 @@ export default function EditorPage() {
1680
  image = mergeOutput !== undefined ? mergeOutput : null;
1681
  label = "Merged Image";
1682
  }
1683
-
1684
  if (image) {
1685
  // Validate image format
1686
  if (!image.startsWith('data:') && !image.startsWith('http') && !image.startsWith('/')) {
@@ -1692,15 +1720,15 @@ export default function EditorPage() {
1692
  }
1693
  }
1694
  }
1695
-
1696
  if (mergeImages.length < 2) {
1697
  throw new Error("Not enough valid inputs for merge. Need at least 2 images.");
1698
  }
1699
-
1700
  // Log merge details for debugging
1701
-
1702
  const prompt = generateMergePrompt(inputData);
1703
-
1704
  // ORIGINAL MERGE LOGIC RESTORED (HF processing commented out)
1705
  /*
1706
  const res = await fetch("/api/hf-process", {
@@ -1713,19 +1741,19 @@ export default function EditorPage() {
1713
  }),
1714
  });
1715
  */
1716
-
1717
  // Use the process route instead of merge route
1718
  const res = await fetch("/api/process", {
1719
  method: "POST",
1720
  headers: { "Content-Type": "application/json" },
1721
- body: JSON.stringify({
1722
  type: "MERGE",
1723
- images: mergeImages,
1724
  prompt,
1725
  apiToken: apiToken || undefined
1726
  }),
1727
  });
1728
-
1729
  // Check if response is actually JSON before parsing
1730
  const contentType = res.headers.get("content-type");
1731
  if (!contentType || !contentType.includes("application/json")) {
@@ -1733,31 +1761,40 @@ export default function EditorPage() {
1733
  console.error("Non-JSON response received:", textResponse);
1734
  throw new Error("Server returned an error page instead of JSON. Check your API key configuration.");
1735
  }
1736
-
1737
  const data = await res.json();
1738
  if (!res.ok) {
1739
  throw new Error(data.error || "Merge failed");
1740
  }
1741
-
1742
  return data.image || (data.images?.[0] as string) || null;
1743
  };
1744
-
1745
  const runMerge = async (mergeId: string) => {
 
 
 
 
 
 
 
 
 
1746
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, isRunning: true, error: null } : n)));
1747
  try {
1748
  const merge = (nodes.find((n) => n.id === mergeId) as MergeNode) || null;
1749
  if (!merge) return;
1750
-
1751
  // Get input nodes with their labels - now accepts any node type
1752
  const inputData = merge.inputs
1753
  .map((id, index) => {
1754
  const inputNode = nodes.find((n) => n.id === id);
1755
  if (!inputNode) return null;
1756
-
1757
  // Support CHARACTER nodes, processed nodes, and MERGE outputs
1758
  let image: string | null = null;
1759
  let label = "";
1760
-
1761
  if (inputNode.type === "CHARACTER") {
1762
  image = (inputNode as CharacterNode).image;
1763
  label = (inputNode as CharacterNode).label || `CHARACTER ${index + 1}`;
@@ -1771,17 +1808,17 @@ export default function EditorPage() {
1771
  image = mergeOutput !== undefined ? mergeOutput : null;
1772
  label = `Merged Image ${index + 1}`;
1773
  }
1774
-
1775
  if (!image) return null;
1776
-
1777
  return { image, label };
1778
  })
1779
  .filter(Boolean) as { image: string; label: string }[];
1780
-
1781
  if (inputData.length < 2) throw new Error("Connect at least two nodes with images (CHARACTER nodes or processed nodes).");
1782
-
1783
  // Debug: Log what we're sending
1784
-
1785
  // Generate dynamic prompt based on number of inputs
1786
  const prompt = generateMergePrompt(inputData);
1787
  const imgs = inputData.map(d => d.image);
@@ -1807,14 +1844,14 @@ export default function EditorPage() {
1807
  const res = await fetch("/api/process", {
1808
  method: "POST",
1809
  headers: { "Content-Type": "application/json" },
1810
- body: JSON.stringify({
1811
  type: "MERGE",
1812
- images: imgs,
1813
  prompt,
1814
  apiToken: apiToken || undefined
1815
  }),
1816
  });
1817
-
1818
  // Check if response is actually JSON before parsing
1819
  const contentType = res.headers.get("content-type");
1820
  if (!contentType || !contentType.includes("application/json")) {
@@ -1822,7 +1859,7 @@ export default function EditorPage() {
1822
  console.error("Non-JSON response received:", textResponse);
1823
  throw new Error("Server returned an error page instead of JSON. Check your API key configuration.");
1824
  }
1825
-
1826
  const js = await res.json();
1827
  if (!res.ok) {
1828
  // Show more helpful error messages
@@ -1834,7 +1871,7 @@ export default function EditorPage() {
1834
  }
1835
  const out = js.image || (js.images?.[0] as string) || null;
1836
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, output: out, isRunning: false } : n)));
1837
-
1838
  // Add merge result to node's history
1839
  if (out) {
1840
  const inputLabels = merge.inputs.map((id, index) => {
@@ -1844,7 +1881,7 @@ export default function EditorPage() {
1844
  }
1845
  return `${inputNode?.type || 'Node'} ${index + 1}`;
1846
  });
1847
-
1848
  }
1849
  } catch (e: any) {
1850
  console.error("Merge error:", e);
@@ -1861,11 +1898,11 @@ export default function EditorPage() {
1861
  maxX = Math.max(maxX, node.x + 500);
1862
  maxY = Math.max(maxY, node.y + 500);
1863
  });
1864
- return {
1865
- x: minX,
1866
- y: minY,
1867
- width: maxX - minX,
1868
- height: maxY - minY
1869
  };
1870
  }, [nodes]);
1871
 
@@ -1887,9 +1924,9 @@ export default function EditorPage() {
1887
  const width = widths[n.type] || 320;
1888
  return { x: n.x + width - 10, y: n.y + 25 };
1889
  };
1890
-
1891
  const getNodeInputPort = (n: AnyNode) => ({ x: n.x + 10, y: n.y + 25 });
1892
-
1893
  const createPath = (x1: number, y1: number, x2: number, y2: number) => {
1894
  const dx = x2 - x1;
1895
  const dy = y2 - y1;
@@ -1897,9 +1934,9 @@ export default function EditorPage() {
1897
  const controlOffset = Math.min(200, Math.max(50, distance * 0.3));
1898
  return `M ${x1} ${y1} C ${x1 + controlOffset} ${y1}, ${x2 - controlOffset} ${y2}, ${x2} ${y2}`;
1899
  };
1900
-
1901
  const paths: { path: string; active?: boolean; processing?: boolean }[] = [];
1902
-
1903
  // Handle all connections
1904
  for (const node of nodes) {
1905
  if (node.type === "MERGE") {
@@ -1911,7 +1948,7 @@ export default function EditorPage() {
1911
  const start = getNodeOutputPort(inputNode);
1912
  const end = getNodeInputPort(node);
1913
  const isProcessing = merge.isRunning; // Only animate to the currently processing merge node
1914
- paths.push({
1915
  path: createPath(start.x, start.y, end.x, end.y),
1916
  processing: isProcessing
1917
  });
@@ -1925,26 +1962,26 @@ export default function EditorPage() {
1925
  const start = getNodeOutputPort(inputNode);
1926
  const end = getNodeInputPort(node);
1927
  const isProcessing = (node as any).isRunning; // Only animate to the currently processing node
1928
- paths.push({
1929
  path: createPath(start.x, start.y, end.x, end.y),
1930
  processing: isProcessing
1931
  });
1932
  }
1933
  }
1934
  }
1935
-
1936
  // Dragging path
1937
  if (draggingFrom && dragPos) {
1938
  const sourceNode = nodes.find(n => n.id === draggingFrom);
1939
  if (sourceNode) {
1940
  const start = getNodeOutputPort(sourceNode);
1941
- paths.push({
1942
- path: createPath(start.x, start.y, dragPos.x, dragPos.y),
1943
- active: true
1944
  });
1945
  }
1946
  }
1947
-
1948
  return paths;
1949
  }, [nodes, draggingFrom, dragPos]);
1950
 
@@ -1997,29 +2034,29 @@ export default function EditorPage() {
1997
  const rect = containerRef.current!.getBoundingClientRect();
1998
  const world = screenToWorld(e.clientX, e.clientY, rect, tx, ty, scale);
1999
  setMenuWorld(world);
2000
-
2001
  // Menu dimensions
2002
  const menuWidth = 224; // w-56 = 224px
2003
  const menuHeight = 320; // Approximate height with max-h-[300px] + padding
2004
-
2005
  // Calculate position relative to container
2006
  let x = e.clientX - rect.left;
2007
  let y = e.clientY - rect.top;
2008
-
2009
  // Adjust if menu would go off right edge
2010
  if (x + menuWidth > rect.width) {
2011
  x = rect.width - menuWidth - 10;
2012
  }
2013
-
2014
  // Adjust if menu would go off bottom edge
2015
  if (y + menuHeight > rect.height) {
2016
  y = rect.height - menuHeight - 10;
2017
  }
2018
-
2019
  // Ensure minimum margins from edges
2020
  x = Math.max(10, x);
2021
  y = Math.max(10, y);
2022
-
2023
  setMenuPos({ x, y });
2024
  setMenuOpen(true);
2025
  };
@@ -2030,8 +2067,8 @@ export default function EditorPage() {
2030
  x: menuWorld.x,
2031
  y: menuWorld.y,
2032
  };
2033
-
2034
- switch(kind) {
2035
  case "CHARACTER":
2036
  addCharacter(menuWorld);
2037
  break;
@@ -2074,24 +2111,89 @@ export default function EditorPage() {
2074
 
2075
  return (
2076
  <div className="min-h-[100svh] bg-background text-foreground">
2077
- <header className="flex items-center justify-between px-6 py-4 border-b border-border/60 bg-card/70 backdrop-blur">
2078
  <h1 className="text-lg font-semibold tracking-wide">
2079
  <span className="mr-2" aria-hidden>🍌</span>Nano Banana Editor
2080
  </h1>
2081
  <div className="flex items-center gap-3">
2082
- {/* ORIGINAL API TOKEN INPUT RESTORED */}
2083
- <label htmlFor="api-token" className="text-sm font-medium text-muted-foreground">
2084
- API Token:
2085
- </label>
2086
- <Input
2087
- id="api-token"
2088
- type="password"
2089
- placeholder="Enter your Google Gemini API token"
2090
- value={apiToken}
2091
- onChange={(e) => setApiToken(e.target.value)}
2092
- className="w-64"
2093
- />
2094
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2095
  <Button
2096
  variant="outline"
2097
  size="sm"
@@ -2101,8 +2203,6 @@ export default function EditorPage() {
2101
  >
2102
  Help
2103
  </Button>
2104
-
2105
-
2106
  </div>
2107
  </header>
2108
 
@@ -2110,7 +2210,7 @@ export default function EditorPage() {
2110
  {showHelpSidebar && (
2111
  <>
2112
  {/* Backdrop */}
2113
- <div
2114
  className="fixed inset-0 bg-black/50 z-[9998]"
2115
  onClick={() => setShowHelpSidebar(false)}
2116
  />
@@ -2128,73 +2228,72 @@ export default function EditorPage() {
2128
  <span className="text-lg">×</span>
2129
  </Button>
2130
  </div>
2131
-
2132
  <div className="space-y-6">
2133
- {/* ORIGINAL HELP CONTENT RESTORED (HF help commented out) */}
2134
- {/*
2135
  <div>
2136
- <h3 className="font-semibold mb-3 text-foreground">🤗 HF Pro Login</h3>
2137
  <div className="text-sm text-muted-foreground space-y-3">
2138
  <div className="p-3 bg-primary/10 border border-primary/20 rounded-lg">
2139
- <p className="font-medium text-primary mb-2">Step 1: Login with Hugging Face</p>
2140
- <p>Click "Login HF PRO" to authenticate with your Hugging Face account.</p>
 
2141
  </div>
2142
  <div className="p-3 bg-secondary border border-border rounded-lg">
2143
- <p className="font-medium text-secondary-foreground mb-2">Step 2: Access fal.ai Models</p>
2144
- <p>Once logged in, you'll have access to fal.ai's Gemini 2.5 Flash Image models.</p>
2145
- </div>
2146
- <div className="p-3 bg-accent border border-border rounded-lg">
2147
- <p className="font-medium text-accent-foreground mb-2">Step 3: Start Creating</p>
2148
- <p>Use the powerful fal.ai models for image generation, merging, editing, and style transfers.</p>
2149
  </div>
2150
  </div>
2151
  </div>
2152
- */}
2153
-
 
 
 
 
 
 
 
 
2154
  <div>
2155
- <h3 className="font-semibold mb-3 text-foreground">🔑 API Token Setup</h3>
2156
- <div className="text-sm text-muted-foreground space-y-3">
2157
- <div className="p-3 bg-primary/10 border border-primary/20 rounded-lg">
2158
- <p className="font-medium text-primary mb-2">Step 1: Get Your API Key</p>
2159
- <p>Visit <a href="https://aistudio.google.com/app/apikey" target="_blank" rel="noopener noreferrer" className="text-primary hover:underline font-medium">Google AI Studio</a> to create your free Gemini API key.</p>
2160
  </div>
2161
- <div className="p-3 bg-secondary border border-border rounded-lg">
2162
- <p className="font-medium text-secondary-foreground mb-2">Step 2: Add Your Token</p>
2163
- <p>Paste your API key in the "API Token" field in the top navigation bar.</p>
2164
  </div>
2165
- <div className="p-3 bg-accent border border-border rounded-lg">
2166
- <p className="font-medium text-accent-foreground mb-2">Step 3: Start Creating</p>
2167
- <p>Your token enables all AI features: image generation, merging, editing, and style transfers.</p>
2168
  </div>
2169
  </div>
2170
  </div>
2171
-
 
2172
  <div>
2173
- <h3 className="font-semibold mb-3 text-foreground">🎨 How to Use the Editor</h3>
2174
- <div className="text-sm text-muted-foreground space-y-2">
2175
- <p>• <strong>Adding Nodes:</strong> Right-click on the editor canvas and choose the node type you want, then drag and drop to position it</p>
2176
- <p>• <strong>Character Nodes:</strong> Upload or drag images to create character nodes</p>
2177
- <p>• <strong>Merge Nodes:</strong> Connect multiple characters to create group photos</p>
2178
- <p>• <strong>Style Nodes:</strong> Apply artistic styles and filters</p>
2179
- <p>• <strong>Background Nodes:</strong> Change or generate new backgrounds</p>
2180
- <p>• <strong>Edit Nodes:</strong> Make specific modifications with text prompts</p>
2181
- </div>
2182
- </div>
2183
-
2184
  <div className="p-4 bg-muted border border-border rounded-lg">
2185
  <h4 className="font-semibold text-foreground mb-2">🔒 Privacy & Security</h4>
2186
  <div className="text-sm text-muted-foreground space-y-1">
2187
- {/* ORIGINAL PRIVACY INFO RESTORED (HF privacy info commented out) */}
2188
- {/*
2189
- <p>• Your HF token is stored securely in HTTP-only cookies</p>
2190
- <p>• Authentication happens through Hugging Face OAuth</p>
2191
- <p>• You can logout anytime to revoke access</p>
2192
- <p>• Processing happens via fal.ai's secure infrastructure</p>
2193
- */}
2194
- <p>• Your API token is stored locally in your browser</p>
2195
- <p>• Tokens are never sent to our servers</p>
2196
- <p>• Keep your API key secure and don't share it</p>
2197
- <p>• You can revoke keys anytime in Google AI Studio</p>
2198
  </div>
2199
  </div>
2200
  </div>
@@ -2229,17 +2328,17 @@ export default function EditorPage() {
2229
  >
2230
  <div
2231
  className="absolute left-0 top-0 will-change-transform"
2232
- style={{
2233
- transform: `translate3d(${tx}px, ${ty}px, 0) scale(${scale})`,
2234
  transformOrigin: "0 0",
2235
  transformStyle: "preserve-3d",
2236
  backfaceVisibility: "hidden"
2237
  }}
2238
  >
2239
- <svg
2240
- className="absolute pointer-events-none z-0"
2241
- style={{
2242
- left: `${svgBounds.x}px`,
2243
  top: `${svgBounds.y}px`,
2244
  width: `${svgBounds.width}px`,
2245
  height: `${svgBounds.height}px`
@@ -2248,10 +2347,10 @@ export default function EditorPage() {
2248
  >
2249
  <defs>
2250
  <filter id="glow">
2251
- <feGaussianBlur stdDeviation="3" result="coloredBlur"/>
2252
  <feMerge>
2253
- <feMergeNode in="coloredBlur"/>
2254
- <feMergeNode in="SourceGraphic"/>
2255
  </feMerge>
2256
  </filter>
2257
  </defs>
@@ -2312,7 +2411,6 @@ export default function EditorPage() {
2312
  onEndConnection={handleEndSingleConnection}
2313
  onProcess={processNode}
2314
  onUpdatePosition={updateNodePosition}
2315
- apiToken={apiToken}
2316
  />
2317
  );
2318
  case "CLOTHES":
@@ -2352,7 +2450,6 @@ export default function EditorPage() {
2352
  onEndConnection={handleEndSingleConnection}
2353
  onProcess={processNode}
2354
  onUpdatePosition={updateNodePosition}
2355
- apiToken={apiToken}
2356
  />
2357
  );
2358
  case "CAMERA":
@@ -2429,26 +2526,26 @@ export default function EditorPage() {
2429
 
2430
  {menuOpen && (
2431
  <div
2432
- className="absolute z-50 rounded-xl border border-white/10 bg-[#111]/95 backdrop-blur p-1 w-56 shadow-2xl"
2433
  style={{ left: menuPos.x, top: menuPos.y }}
2434
  onMouseLeave={() => setMenuOpen(false)}
2435
  >
2436
- <div className="px-3 py-2 text-xs text-white/60">Add node</div>
2437
- <div
2438
  className="max-h-[300px] overflow-y-auto scrollbar-thin pr-1"
2439
  onWheel={(e) => e.stopPropagation()}
2440
  >
2441
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CHARACTER")}>CHARACTER</button>
2442
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("MERGE")}>MERGE</button>
2443
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("BACKGROUND")}>BACKGROUND</button>
2444
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CLOTHES")}>CLOTHES</button>
2445
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("STYLE")}>STYLE</button>
2446
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("EDIT")}>EDIT</button>
2447
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("CAMERA")}>CAMERA</button>
2448
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("AGE")}>AGE</button>
2449
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("FACE")}>FACE</button>
2450
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("LIGHTNING")}>LIGHTNING</button>
2451
- <button className="w-full text-left px-3 py-2 text-sm hover:bg-white/10 rounded-lg" onClick={() => addFromMenu("POSES")}>POSES</button>
2452
  </div>
2453
  </div>
2454
  )}
 
66
  */
67
  function generateMergePrompt(characterData: { image: string; label: string }[]): string {
68
  const count = characterData.length;
69
+
70
  // Create a summary of all images being processed
71
  const labels = characterData.map((d, i) => `Image ${i + 1} (${d.label})`).join(", ");
72
+
73
  // Return comprehensive prompt with specific instructions for natural-looking merge
74
  return `MERGE TASK: Create a natural, cohesive group photo combining ALL subjects from ${count} provided images.
75
 
 
109
  try {
110
  const response = await fetch(dataUrl);
111
  const blob = await response.blob();
112
+
113
  // Convert to PNG if not already PNG
114
  if (blob.type !== 'image/png') {
115
  const canvas = document.createElement('canvas');
116
  const ctx = canvas.getContext('2d');
117
  const img = new Image();
118
+
119
  await new Promise((resolve) => {
120
  img.onload = () => {
121
  canvas.width = img.width;
 
125
  };
126
  img.src = dataUrl;
127
  });
128
+
129
  const pngBlob = await new Promise<Blob>((resolve) => {
130
  canvas.toBlob((blob) => resolve(blob!), 'image/png');
131
  });
132
+
133
  await navigator.clipboard.write([
134
  new ClipboardItem({ 'image/png': pngBlob })
135
  ]);
 
198
  output?: string; // Processed image with new background
199
  backgroundType: "color" | "gradient" | "image" | "city" | "photostudio" | "upload" | "custom"; // Type of background to apply
200
  backgroundColor?: string; // Hex color code for solid color backgrounds
201
+
202
  // Gradient background properties
203
  gradientDirection?: string; // Direction of gradient (to right, to bottom, radial, etc.)
204
  gradientStartColor?: string; // Starting color of gradient
205
  gradientEndColor?: string; // Ending color of gradient
206
+
207
  backgroundImage?: string; // URL/path for preset background images
208
+
209
  // City scene properties
210
  citySceneType?: string; // Type of city scene (busy_street, times_square, etc.)
211
  cityTimeOfDay?: string; // Time of day for city scene
212
+
213
  // Photo studio properties
214
  studioSetup?: string; // Studio background setup type
215
  studioBackgroundColor?: string; // Color for colored seamless background
216
  studioLighting?: string; // Studio lighting setup
217
  faceCamera?: boolean; // Whether to position character facing camera
218
+
219
  customBackgroundImage?: string; // User-uploaded background image data
220
  customPrompt?: string; // AI prompt for generating custom backgrounds
221
  isRunning?: boolean; // Processing state indicator
 
440
 
441
  function useNodeDrag(
442
  nodeId: string,
443
+ scaleRef: React.MutableRefObject<number>,
444
  initial: { x: number; y: number },
445
  onUpdatePosition: (id: string, x: number, y: number) => void
446
  ) {
 
449
  const start = useRef<{ sx: number; sy: number; ox: number; oy: number } | null>(
450
  null
451
  );
452
+
453
  useEffect(() => {
454
  setLocalPos(initial);
455
  }, [initial.x, initial.y]);
456
+
457
  const onPointerDown = (e: React.PointerEvent) => {
458
  e.stopPropagation();
459
  dragging.current = true;
 
477
  return { pos: localPos, onPointerDown, onPointerMove, onPointerUp };
478
  }
479
 
480
+ function Port({
481
+ className,
482
  nodeId,
483
  isOutput,
484
  onStartConnection,
485
  onEndConnection
486
+ }: {
487
  className?: string;
488
  nodeId?: string;
489
  isOutput?: boolean;
 
496
  onStartConnection(nodeId);
497
  }
498
  };
499
+
500
  const handlePointerUp = (e: React.PointerEvent) => {
501
  e.stopPropagation();
502
  if (!isOutput && nodeId && onEndConnection) {
 
505
  };
506
 
507
  return (
508
+ <div
509
+ className={cx("nb-port", className)}
510
  onPointerDown={handlePointerDown}
511
  onPointerUp={handlePointerUp}
512
  onPointerEnter={handlePointerUp}
 
533
  }) {
534
  const { pos, onPointerDown, onPointerMove, onPointerUp } = useNodeDrag(
535
  node.id,
536
+ scaleRef,
537
  { x: node.x, y: node.y },
538
  onUpdatePosition
539
  );
 
570
 
571
  return (
572
  <div
573
+ className="nb-node absolute w-[340px] select-none"
574
  style={{ left: pos.x, top: pos.y }}
575
  onDrop={onDrop}
576
  onDragOver={(e) => e.preventDefault()}
 
588
  onChange={(e) => onChangeLabel(node.id, e.target.value)}
589
  />
590
  <div className="flex items-center gap-2">
591
+ <Button
592
  variant="ghost" size="icon" className="text-destructive hover:bg-destructive/20 h-6 w-6"
593
  onClick={(e) => {
594
  e.stopPropagation();
 
603
  >
604
  ×
605
  </Button>
606
+ <Port
607
+ className="out"
608
  nodeId={node.id}
609
  isOutput={true}
610
  onStartConnection={onStartConnection}
 
612
  </div>
613
  </div>
614
  <div className="p-3 space-y-3">
615
+ <div className="aspect-[4/5] w-full rounded-xl bg-muted/30 grid place-items-center overflow-hidden border border-border/10">
616
  <img
617
  src={node.image}
618
  alt="character"
 
637
  await navigator.clipboard.write([
638
  new ClipboardItem({ [blob.type]: blob })
639
  ]);
640
+
641
  // Show visual feedback
642
  const img = e.currentTarget;
643
  const originalFilter = img.style.filter;
644
  img.style.filter = "brightness(1.2)";
645
+
646
  setTimeout(() => {
647
  img.style.filter = originalFilter;
648
  }, 500);
 
654
  />
655
  </div>
656
  <div className="flex gap-2">
657
+ <label className="text-xs bg-secondary hover:bg-secondary/80 text-secondary-foreground transition-colors rounded px-3 py-1 cursor-pointer">
658
  Upload
659
  <input
660
  type="file"
 
668
  // Reset input safely
669
  try {
670
  e.currentTarget.value = "";
671
+ } catch { }
672
  }
673
  }}
674
  />
675
  </label>
676
  <button
677
+ className="text-xs bg-secondary hover:bg-secondary/80 text-secondary-foreground transition-colors rounded px-3 py-1"
678
  onClick={async () => {
679
  try {
680
  const text = await navigator.clipboard.readText();
681
  if (text && (text.startsWith("http") || text.startsWith("data:image"))) {
682
  onChangeImage(node.id, text);
683
  }
684
+ } catch { }
685
  }}
686
  >
687
  Paste URL
 
724
 
725
 
726
  return (
727
+ <div className="nb-node absolute w-[420px]" style={{ left: pos.x, top: pos.y }}>
728
  <div
729
  className="nb-header cursor-grab active:cursor-grabbing rounded-t-[14px] px-3 py-2 flex items-center justify-between"
730
  onPointerDown={onPointerDown}
731
  onPointerMove={onPointerMove}
732
  onPointerUp={onPointerUp}
733
  >
734
+ <Port
735
+ className="in"
736
  nodeId={node.id}
737
  isOutput={false}
738
  onEndConnection={onEndConnection}
 
756
  >
757
  ×
758
  </Button>
759
+ <Port
760
+ className="out"
761
  nodeId={node.id}
762
  isOutput={true}
763
  onStartConnection={onStartConnection}
 
765
  </div>
766
  </div>
767
  <div className="p-3 space-y-3">
768
+ <div className="text-xs text-muted-foreground font-medium">Inputs</div>
769
  <div className="flex flex-wrap gap-2">
770
  {node.inputs.map((id) => {
771
  const inputNode = allNodes.find((n) => n.id === id);
772
  if (!inputNode) return null;
773
+
774
  // Get image and label based on node type
775
  let image: string | null = null;
776
  let label = "";
777
+
778
  if (inputNode.type === "CHARACTER") {
779
  image = (inputNode as CharacterNode).image;
780
  label = (inputNode as CharacterNode).label || "Character";
 
789
  // Node without output yet
790
  label = `${inputNode.type} (pending)`;
791
  }
792
+
793
  return (
794
+ <div key={id} className="flex items-center gap-2 bg-secondary/50 border border-border/50 text-secondary-foreground rounded px-2 py-1">
795
  {image && (
796
+ <div className="w-6 h-6 rounded overflow-hidden bg-muted">
797
+ <img
798
+ src={image}
799
+ className="w-full h-full object-contain cursor-pointer hover:opacity-80"
800
  alt="inp"
801
  onClick={async () => {
802
  try {
 
817
  await navigator.clipboard.write([
818
  new ClipboardItem({ [blob.type]: blob })
819
  ]);
820
+
821
  // Show visual feedback
822
  const img = e.currentTarget;
823
  const originalFilter = img.style.filter;
824
  img.style.filter = "brightness(1.2)";
825
+
826
  setTimeout(() => {
827
  img.style.filter = originalFilter;
828
  }, 300);
 
874
  </div>
875
  <div className="w-full min-h-[200px] max-h-[400px] rounded-xl bg-black/40 grid place-items-center">
876
  {node.output ? (
877
+ <img
878
+ src={node.output}
879
+ className="w-full h-auto max-h-[400px] object-contain rounded-xl cursor-pointer hover:opacity-80 transition-opacity"
880
  alt="output"
881
  onClick={async () => {
882
  if (node.output) {
 
900
  await navigator.clipboard.write([
901
  new ClipboardItem({ [blob.type]: blob })
902
  ]);
903
+
904
  // Show visual feedback
905
  const img = e.currentTarget;
906
  const originalFilter = img.style.filter;
907
  img.style.filter = "brightness(1.2)";
908
+
909
  setTimeout(() => {
910
  img.style.filter = originalFilter;
911
  }, 500);
 
1032
  alert('OAuth client ID not configured. Please check environment variables.');
1033
  return;
1034
  }
1035
+
1036
  window.location.href = await oauthLoginUrl({
1037
  clientId,
1038
  redirectUrl: `${window.location.origin}/api/auth/callback`
 
1042
 
1043
  // Connection dragging state
1044
  const [draggingFrom, setDraggingFrom] = useState<string | null>(null);
1045
+ const [dragPos, setDragPos] = useState<{ x: number, y: number } | null>(null);
1046
+
1047
  // API Token state (restored for manual review)
1048
  const [apiToken, setApiToken] = useState("");
1049
  const [showHelpSidebar, setShowHelpSidebar] = useState(false);
1050
+
1051
+ // Processing Mode: 'nanobananapro' uses Gemini API, 'huggingface' uses HF models
1052
+ type ProcessingMode = 'nanobananapro' | 'huggingface';
1053
+ const [processingMode, setProcessingMode] = useState<ProcessingMode>('nanobananapro');
1054
+
1055
+ // Available HF models
1056
+ const HF_MODELS = {
1057
+ "FLUX.1-Kontext-dev": {
1058
+ id: "black-forest-labs/FLUX.1-Kontext-dev",
1059
+ name: "FLUX.1 Kontext",
1060
+ type: "image-to-image",
1061
+ description: "Advanced image editing with context understanding",
1062
+ },
1063
+ "Qwen-Image-Edit": {
1064
+ id: "Qwen/Qwen-Image-Edit",
1065
+ name: "Qwen Image Edit",
1066
+ type: "image-to-image",
1067
+ description: "Powerful image editing and manipulation",
1068
+ },
1069
+ "FLUX.1-dev": {
1070
+ id: "black-forest-labs/FLUX.1-dev",
1071
+ name: "FLUX.1 Dev",
1072
+ type: "text-to-image",
1073
+ description: "High-quality text-to-image generation",
1074
+ },
1075
+ };
1076
+
1077
+ const [selectedHfModel, setSelectedHfModel] = useState<keyof typeof HF_MODELS>("FLUX.1-Kontext-dev");
1078
+
1079
+
1080
  // HF PRO AUTHENTICATION
1081
  const [isHfProLoggedIn, setIsHfProLoggedIn] = useState(false);
1082
  const [isCheckingAuth, setIsCheckingAuth] = useState(true);
 
1172
  // - MERGE nodes (can have output after merging)
1173
  // - Any processing node (BACKGROUND, CLOTHES, BLEND, etc.)
1174
  // - Even unprocessed nodes (for configuration chaining)
1175
+
1176
  // All nodes can be connected for chaining
1177
+ setNodes(prev => prev.map(n =>
1178
  n.id === nodeId ? { ...n, input: draggingFrom } : n
1179
  ));
1180
  }
 
1190
  const countPendingConfigurations = (startNodeId: string): number => {
1191
  let count = 0;
1192
  const visited = new Set<string>();
1193
+
1194
  const traverse = (nodeId: string) => {
1195
  if (visited.has(nodeId)) return;
1196
  visited.add(nodeId);
1197
+
1198
  const node = nodes.find(n => n.id === nodeId);
1199
  if (!node) return;
1200
+
1201
  // Check if this node has configuration but no output
1202
  if (!(node as any).output && node.type !== "CHARACTER" && node.type !== "MERGE") {
1203
  const config = getNodeConfiguration(node);
 
1205
  count++;
1206
  }
1207
  }
1208
+
1209
  // Check upstream
1210
  const upstreamId = (node as any).input;
1211
  if (upstreamId) {
1212
  traverse(upstreamId);
1213
  }
1214
  };
1215
+
1216
  traverse(startNodeId);
1217
  return count;
1218
  };
1219
+
1220
  // Helper to extract configuration from a node
1221
  const getNodeConfiguration = (node: AnyNode): Record<string, unknown> => {
1222
  const config: Record<string, unknown> = {};
1223
+
1224
  switch (node.type) {
1225
  case "BACKGROUND":
1226
  if ((node as BackgroundNode).backgroundType) {
 
1230
  config.backgroundImage = bgNode.backgroundImage;
1231
  config.customBackgroundImage = bgNode.customBackgroundImage;
1232
  config.customPrompt = bgNode.customPrompt;
1233
+
1234
  // Gradient properties
1235
  if (bgNode.backgroundType === "gradient") {
1236
  config.gradientDirection = bgNode.gradientDirection;
1237
  config.gradientStartColor = bgNode.gradientStartColor;
1238
  config.gradientEndColor = bgNode.gradientEndColor;
1239
  }
1240
+
1241
  // City scene properties
1242
  if (bgNode.backgroundType === "city") {
1243
  config.citySceneType = bgNode.citySceneType;
1244
  config.cityTimeOfDay = bgNode.cityTimeOfDay;
1245
  }
1246
+
1247
  // Photo studio properties
1248
  if (bgNode.backgroundType === "photostudio") {
1249
  config.studioSetup = bgNode.studioSetup;
 
1324
  }
1325
  break;
1326
  }
1327
+
1328
  return config;
1329
  };
1330
 
 
1341
  let accumulatedParams: any = {};
1342
  const processedNodes: string[] = []; // Track which nodes' configs we're applying
1343
  const inputId = (node as any).input;
1344
+
1345
  if (inputId) {
1346
  // Track unprocessed MERGE nodes that need to be executed
1347
  const unprocessedMerges: MergeNode[] = [];
1348
+
1349
  // Find the source image by traversing the chain backwards
1350
  const findSourceImage = (currentNodeId: string, visited: Set<string> = new Set()): string | null => {
1351
  if (visited.has(currentNodeId)) return null;
1352
  visited.add(currentNodeId);
1353
+
1354
  const currentNode = nodes.find(n => n.id === currentNodeId);
1355
  if (!currentNode) return null;
1356
+
1357
  // If this is a CHARACTER node, return its image
1358
  if (currentNode.type === "CHARACTER") {
1359
  return (currentNode as CharacterNode).image;
1360
  }
1361
+
1362
  // If this is a MERGE node with output, return its output
1363
  if (currentNode.type === "MERGE" && (currentNode as MergeNode).output) {
1364
  return (currentNode as MergeNode).output || null;
1365
  }
1366
+
1367
  // If any node has been processed, return its output
1368
  if ((currentNode as any).output) {
1369
  return (currentNode as any).output;
1370
  }
1371
+
1372
  // For MERGE nodes without output, we need to process them first
1373
  if (currentNode.type === "MERGE") {
1374
  const merge = currentNode as MergeNode;
 
1384
  if (inputImage) return inputImage;
1385
  }
1386
  }
1387
+
1388
  // Otherwise, check upstream
1389
  const upstreamId = (currentNode as any).input;
1390
  if (upstreamId) {
1391
  return findSourceImage(upstreamId, visited);
1392
  }
1393
+
1394
  return null;
1395
  };
1396
+
1397
  // Collect all configurations from unprocessed nodes in the chain
1398
  const collectConfigurations = (currentNodeId: string, visited: Set<string> = new Set()): any => {
1399
  if (visited.has(currentNodeId)) return {};
1400
  visited.add(currentNodeId);
1401
+
1402
  const currentNode = nodes.find(n => n.id === currentNodeId);
1403
  if (!currentNode) return {};
1404
+
1405
  let configs: any = {};
1406
+
1407
  // First, collect from upstream nodes
1408
  const upstreamId = (currentNode as any).input;
1409
  if (upstreamId) {
1410
  configs = collectConfigurations(upstreamId, visited);
1411
  }
1412
+
1413
  // Add this node's configuration only if:
1414
  // 1. It's the current node being processed, OR
1415
  // 2. It hasn't been processed yet (no output) AND it's not the current node
1416
+ const shouldIncludeConfig =
1417
  currentNodeId === nodeId || // Always include current node's config
1418
  (!(currentNode as any).output && currentNodeId !== nodeId); // Include unprocessed intermediate nodes
1419
+
1420
  if (shouldIncludeConfig) {
1421
  const nodeConfig = getNodeConfiguration(currentNode);
1422
  if (Object.keys(nodeConfig).length > 0) {
 
1427
  }
1428
  }
1429
  }
1430
+
1431
  return configs;
1432
  };
1433
+
1434
  // Find the source image
1435
  inputImage = findSourceImage(inputId);
1436
+
1437
  // If we found unprocessed merges, we need to execute them first
1438
  if (unprocessedMerges.length > 0 && !inputImage) {
1439
+
1440
  // Process each merge node
1441
  for (const merge of unprocessedMerges) {
1442
  // Set loading state for the merge
1443
+ setNodes(prev => prev.map(n =>
1444
  n.id === merge.id ? { ...n, isRunning: true, error: null } : n
1445
  ));
1446
+
1447
  try {
1448
  const mergeOutput = await executeMerge(merge);
1449
+
1450
  // Update the merge node with output
1451
+ setNodes(prev => prev.map(n =>
1452
  n.id === merge.id ? { ...n, output: mergeOutput || undefined, isRunning: false, error: null } : n
1453
  ));
1454
+
1455
  // Track that we processed this merge as part of the chain
1456
  processedNodes.push(merge.id);
1457
+
1458
  // Now use this as our input image if it's the direct input
1459
  if (inputId === merge.id) {
1460
  inputImage = mergeOutput;
1461
  }
1462
  } catch (e: any) {
1463
  console.error("Auto-merge error:", e);
1464
+ setNodes(prev => prev.map(n =>
1465
  n.id === merge.id ? { ...n, isRunning: false, error: e?.message || "Merge failed" } : n
1466
  ));
1467
  // Abort the main processing if merge failed
1468
+ setNodes(prev => prev.map(n =>
1469
  n.id === nodeId ? { ...n, error: "Failed to process upstream MERGE node", isRunning: false } : n
1470
  ));
1471
  return;
1472
  }
1473
  }
1474
+
1475
  // After processing merges, try to find the source image again
1476
  if (!inputImage) {
1477
  inputImage = findSourceImage(inputId);
1478
  }
1479
  }
1480
+
1481
  // Collect configurations from the chain
1482
  accumulatedParams = collectConfigurations(inputId, new Set());
1483
  }
1484
 
1485
  if (!inputImage) {
1486
+ const errorMsg = inputId
1487
  ? "No source image found in the chain. Connect to a CHARACTER node or processed node."
1488
  : "No input connected. Connect an image source to this node.";
1489
+ setNodes(prev => prev.map(n =>
1490
  n.id === nodeId ? { ...n, error: errorMsg, isRunning: false } : n
1491
  ));
1492
  return;
 
1495
  // Add current node's configuration
1496
  const currentNodeConfig = getNodeConfiguration(node);
1497
  const params = { ...accumulatedParams, ...currentNodeConfig };
1498
+
1499
  // Count how many unprocessed nodes we're combining
1500
+ const unprocessedNodeCount = Object.keys(params).length > 0 ?
1501
  (processedNodes.length + 1) : 1;
1502
+
1503
  // Show info about batch processing
1504
  if (unprocessedNodeCount > 1) {
1505
  } else {
 
1518
  if (inputImage && inputImage.length > 10 * 1024 * 1024) { // 10MB limit warning
1519
  console.warn("Large input image detected, size:", (inputImage.length / (1024 * 1024)).toFixed(2) + "MB");
1520
  }
1521
+
1522
  // Check if params contains custom images and validate them
1523
  if (params.clothesImage) {
1524
  // Validate it's a proper data URL
 
1526
  throw new Error("Invalid clothes image format. Please upload a valid image.");
1527
  }
1528
  }
1529
+
1530
  if (params.customBackgroundImage) {
1531
  // Validate it's a proper data URL
1532
  if (!params.customBackgroundImage.startsWith('data:') && !params.customBackgroundImage.startsWith('http') && !params.customBackgroundImage.startsWith('/')) {
1533
  throw new Error("Invalid background image format. Please upload a valid image.");
1534
  }
1535
  }
1536
+
1537
  // Log request details for debugging
 
 
 
 
 
 
 
 
 
 
1538
 
1539
+ // Conditionally use HuggingFace or Gemini API based on processing mode
1540
+ let res: Response;
1541
+
1542
+ if (processingMode === 'huggingface') {
1543
+ // Use HuggingFace models
1544
+ if (!isHfProLoggedIn) {
1545
+ throw new Error("Please login with HuggingFace to use HF models. Click 'Login with HuggingFace' in the header.");
1546
+ }
1547
+
1548
+ res = await fetch("/api/hf-process", {
1549
+ method: "POST",
1550
+ headers: { "Content-Type": "application/json" },
1551
+ body: JSON.stringify({
1552
+ type: "COMBINED",
1553
+ model: selectedHfModel,
1554
+ image: inputImage,
1555
+ params
1556
+ }),
1557
+ });
1558
+ } else {
1559
+ // Use Nano Banana Pro (Gemini API)
1560
+ res = await fetch("/api/process", {
1561
+ method: "POST",
1562
+ headers: { "Content-Type": "application/json" },
1563
+ body: JSON.stringify({
1564
+ type: "COMBINED",
1565
+ image: inputImage,
1566
+ params,
1567
+ apiToken: apiToken || undefined
1568
+ }),
1569
+ });
1570
+ }
1571
 
1572
  // Check if response is actually JSON before parsing
1573
  const contentType = res.headers.get("content-type");
 
1580
  const data = await res.json();
1581
  if (!res.ok) {
1582
  // Handle both string and object error formats
1583
+ const errorMessage = typeof data.error === 'string'
1584
+ ? data.error
1585
  : data.error?.message || JSON.stringify(data.error) || "Processing failed";
1586
  throw new Error(errorMessage);
1587
  }
 
1601
  }));
1602
 
1603
  // Add to node's history
1604
+ const description = unprocessedNodeCount > 1
1605
  ? `Combined ${unprocessedNodeCount} transformations`
1606
  : `${node.type} transformation`;
1607
+
1608
+
1609
  if (unprocessedNodeCount > 1) {
1610
  }
1611
  } catch (e: any) {
 
1688
  // Get images from merge inputs - now accepts any node type
1689
  const mergeImages: string[] = [];
1690
  const inputData: { image: string; label: string }[] = [];
1691
+
1692
  for (const inputId of merge.inputs) {
1693
  const inputNode = nodes.find(n => n.id === inputId);
1694
  if (inputNode) {
1695
  let image: string | null = null;
1696
  let label = "";
1697
+
1698
  if (inputNode.type === "CHARACTER") {
1699
  image = (inputNode as CharacterNode).image;
1700
  label = (inputNode as CharacterNode).label || "";
 
1708
  image = mergeOutput !== undefined ? mergeOutput : null;
1709
  label = "Merged Image";
1710
  }
1711
+
1712
  if (image) {
1713
  // Validate image format
1714
  if (!image.startsWith('data:') && !image.startsWith('http') && !image.startsWith('/')) {
 
1720
  }
1721
  }
1722
  }
1723
+
1724
  if (mergeImages.length < 2) {
1725
  throw new Error("Not enough valid inputs for merge. Need at least 2 images.");
1726
  }
1727
+
1728
  // Log merge details for debugging
1729
+
1730
  const prompt = generateMergePrompt(inputData);
1731
+
1732
  // ORIGINAL MERGE LOGIC RESTORED (HF processing commented out)
1733
  /*
1734
  const res = await fetch("/api/hf-process", {
 
1741
  }),
1742
  });
1743
  */
1744
+
1745
  // Use the process route instead of merge route
1746
  const res = await fetch("/api/process", {
1747
  method: "POST",
1748
  headers: { "Content-Type": "application/json" },
1749
+ body: JSON.stringify({
1750
  type: "MERGE",
1751
+ images: mergeImages,
1752
  prompt,
1753
  apiToken: apiToken || undefined
1754
  }),
1755
  });
1756
+
1757
  // Check if response is actually JSON before parsing
1758
  const contentType = res.headers.get("content-type");
1759
  if (!contentType || !contentType.includes("application/json")) {
 
1761
  console.error("Non-JSON response received:", textResponse);
1762
  throw new Error("Server returned an error page instead of JSON. Check your API key configuration.");
1763
  }
1764
+
1765
  const data = await res.json();
1766
  if (!res.ok) {
1767
  throw new Error(data.error || "Merge failed");
1768
  }
1769
+
1770
  return data.image || (data.images?.[0] as string) || null;
1771
  };
1772
+
1773
  const runMerge = async (mergeId: string) => {
1774
+ // Check if using HuggingFace mode - MERGE is not supported
1775
+ if (processingMode === 'huggingface') {
1776
+ setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? {
1777
+ ...n,
1778
+ error: "MERGE requires Nano Banana Pro mode. HuggingFace models only accept single images. Please switch to '🍌 Nano Banana Pro' in the header and enter your Gemini API key."
1779
+ } : n)));
1780
+ return;
1781
+ }
1782
+
1783
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, isRunning: true, error: null } : n)));
1784
  try {
1785
  const merge = (nodes.find((n) => n.id === mergeId) as MergeNode) || null;
1786
  if (!merge) return;
1787
+
1788
  // Get input nodes with their labels - now accepts any node type
1789
  const inputData = merge.inputs
1790
  .map((id, index) => {
1791
  const inputNode = nodes.find((n) => n.id === id);
1792
  if (!inputNode) return null;
1793
+
1794
  // Support CHARACTER nodes, processed nodes, and MERGE outputs
1795
  let image: string | null = null;
1796
  let label = "";
1797
+
1798
  if (inputNode.type === "CHARACTER") {
1799
  image = (inputNode as CharacterNode).image;
1800
  label = (inputNode as CharacterNode).label || `CHARACTER ${index + 1}`;
 
1808
  image = mergeOutput !== undefined ? mergeOutput : null;
1809
  label = `Merged Image ${index + 1}`;
1810
  }
1811
+
1812
  if (!image) return null;
1813
+
1814
  return { image, label };
1815
  })
1816
  .filter(Boolean) as { image: string; label: string }[];
1817
+
1818
  if (inputData.length < 2) throw new Error("Connect at least two nodes with images (CHARACTER nodes or processed nodes).");
1819
+
1820
  // Debug: Log what we're sending
1821
+
1822
  // Generate dynamic prompt based on number of inputs
1823
  const prompt = generateMergePrompt(inputData);
1824
  const imgs = inputData.map(d => d.image);
 
1844
  const res = await fetch("/api/process", {
1845
  method: "POST",
1846
  headers: { "Content-Type": "application/json" },
1847
+ body: JSON.stringify({
1848
  type: "MERGE",
1849
+ images: imgs,
1850
  prompt,
1851
  apiToken: apiToken || undefined
1852
  }),
1853
  });
1854
+
1855
  // Check if response is actually JSON before parsing
1856
  const contentType = res.headers.get("content-type");
1857
  if (!contentType || !contentType.includes("application/json")) {
 
1859
  console.error("Non-JSON response received:", textResponse);
1860
  throw new Error("Server returned an error page instead of JSON. Check your API key configuration.");
1861
  }
1862
+
1863
  const js = await res.json();
1864
  if (!res.ok) {
1865
  // Show more helpful error messages
 
1871
  }
1872
  const out = js.image || (js.images?.[0] as string) || null;
1873
  setNodes((prev) => prev.map((n) => (n.id === mergeId && n.type === "MERGE" ? { ...n, output: out, isRunning: false } : n)));
1874
+
1875
  // Add merge result to node's history
1876
  if (out) {
1877
  const inputLabels = merge.inputs.map((id, index) => {
 
1881
  }
1882
  return `${inputNode?.type || 'Node'} ${index + 1}`;
1883
  });
1884
+
1885
  }
1886
  } catch (e: any) {
1887
  console.error("Merge error:", e);
 
1898
  maxX = Math.max(maxX, node.x + 500);
1899
  maxY = Math.max(maxY, node.y + 500);
1900
  });
1901
+ return {
1902
+ x: minX,
1903
+ y: minY,
1904
+ width: maxX - minX,
1905
+ height: maxY - minY
1906
  };
1907
  }, [nodes]);
1908
 
 
1924
  const width = widths[n.type] || 320;
1925
  return { x: n.x + width - 10, y: n.y + 25 };
1926
  };
1927
+
1928
  const getNodeInputPort = (n: AnyNode) => ({ x: n.x + 10, y: n.y + 25 });
1929
+
1930
  const createPath = (x1: number, y1: number, x2: number, y2: number) => {
1931
  const dx = x2 - x1;
1932
  const dy = y2 - y1;
 
1934
  const controlOffset = Math.min(200, Math.max(50, distance * 0.3));
1935
  return `M ${x1} ${y1} C ${x1 + controlOffset} ${y1}, ${x2 - controlOffset} ${y2}, ${x2} ${y2}`;
1936
  };
1937
+
1938
  const paths: { path: string; active?: boolean; processing?: boolean }[] = [];
1939
+
1940
  // Handle all connections
1941
  for (const node of nodes) {
1942
  if (node.type === "MERGE") {
 
1948
  const start = getNodeOutputPort(inputNode);
1949
  const end = getNodeInputPort(node);
1950
  const isProcessing = merge.isRunning; // Only animate to the currently processing merge node
1951
+ paths.push({
1952
  path: createPath(start.x, start.y, end.x, end.y),
1953
  processing: isProcessing
1954
  });
 
1962
  const start = getNodeOutputPort(inputNode);
1963
  const end = getNodeInputPort(node);
1964
  const isProcessing = (node as any).isRunning; // Only animate to the currently processing node
1965
+ paths.push({
1966
  path: createPath(start.x, start.y, end.x, end.y),
1967
  processing: isProcessing
1968
  });
1969
  }
1970
  }
1971
  }
1972
+
1973
  // Dragging path
1974
  if (draggingFrom && dragPos) {
1975
  const sourceNode = nodes.find(n => n.id === draggingFrom);
1976
  if (sourceNode) {
1977
  const start = getNodeOutputPort(sourceNode);
1978
+ paths.push({
1979
+ path: createPath(start.x, start.y, dragPos.x, dragPos.y),
1980
+ active: true
1981
  });
1982
  }
1983
  }
1984
+
1985
  return paths;
1986
  }, [nodes, draggingFrom, dragPos]);
1987
 
 
2034
  const rect = containerRef.current!.getBoundingClientRect();
2035
  const world = screenToWorld(e.clientX, e.clientY, rect, tx, ty, scale);
2036
  setMenuWorld(world);
2037
+
2038
  // Menu dimensions
2039
  const menuWidth = 224; // w-56 = 224px
2040
  const menuHeight = 320; // Approximate height with max-h-[300px] + padding
2041
+
2042
  // Calculate position relative to container
2043
  let x = e.clientX - rect.left;
2044
  let y = e.clientY - rect.top;
2045
+
2046
  // Adjust if menu would go off right edge
2047
  if (x + menuWidth > rect.width) {
2048
  x = rect.width - menuWidth - 10;
2049
  }
2050
+
2051
  // Adjust if menu would go off bottom edge
2052
  if (y + menuHeight > rect.height) {
2053
  y = rect.height - menuHeight - 10;
2054
  }
2055
+
2056
  // Ensure minimum margins from edges
2057
  x = Math.max(10, x);
2058
  y = Math.max(10, y);
2059
+
2060
  setMenuPos({ x, y });
2061
  setMenuOpen(true);
2062
  };
 
2067
  x: menuWorld.x,
2068
  y: menuWorld.y,
2069
  };
2070
+
2071
+ switch (kind) {
2072
  case "CHARACTER":
2073
  addCharacter(menuWorld);
2074
  break;
 
2111
 
2112
  return (
2113
  <div className="min-h-[100svh] bg-background text-foreground">
2114
+ <header className="flex items-center justify-between px-6 py-3 border-b border-border/60 bg-card/70 backdrop-blur">
2115
  <h1 className="text-lg font-semibold tracking-wide">
2116
  <span className="mr-2" aria-hidden>🍌</span>Nano Banana Editor
2117
  </h1>
2118
  <div className="flex items-center gap-3">
2119
+ {/* Processing Mode Toggle */}
2120
+ <div className="flex items-center gap-2 p-1 bg-muted/50 rounded-lg">
2121
+ <button
2122
+ className={`px-3 py-1.5 text-sm font-medium rounded-md transition-colors ${processingMode === 'nanobananapro'
2123
+ ? 'bg-primary text-primary-foreground shadow-sm'
2124
+ : 'text-muted-foreground hover:text-foreground'
2125
+ }`}
2126
+ onClick={() => setProcessingMode('nanobananapro')}
2127
+ title="Use Google Gemini API - supports all features including MERGE"
2128
+ >
2129
+ 🍌 Nano Banana Pro
2130
+ </button>
2131
+ <button
2132
+ className={`px-3 py-1.5 text-sm font-medium rounded-md transition-colors ${processingMode === 'huggingface'
2133
+ ? 'bg-primary text-primary-foreground shadow-sm'
2134
+ : 'text-muted-foreground hover:text-foreground'
2135
+ }`}
2136
+ onClick={() => setProcessingMode('huggingface')}
2137
+ title="Use HuggingFace models - requires HF login"
2138
+ >
2139
+ 🤗 HuggingFace
2140
+ </button>
2141
+ </div>
2142
+
2143
+ {/* Conditional UI based on processing mode */}
2144
+ {processingMode === 'nanobananapro' ? (
2145
+ <>
2146
+ <div className="h-6 w-px bg-border" />
2147
+ <label htmlFor="api-token" className="text-sm font-medium text-muted-foreground">
2148
+ Gemini API Key:
2149
+ </label>
2150
+ <Input
2151
+ id="api-token"
2152
+ type="password"
2153
+ placeholder="Enter your Google Gemini API key"
2154
+ value={apiToken}
2155
+ onChange={(e) => setApiToken(e.target.value)}
2156
+ className="w-56"
2157
+ />
2158
+ </>
2159
+ ) : (
2160
+ <>
2161
+ <div className="h-6 w-px bg-border" />
2162
+ {/* HF Login Button */}
2163
+ <Button
2164
+ variant={isHfProLoggedIn ? "outline" : "default"}
2165
+ size="sm"
2166
+ className="h-8"
2167
+ onClick={handleHfProLogin}
2168
+ disabled={isCheckingAuth}
2169
+ >
2170
+ {isCheckingAuth ? "Checking..." : isHfProLoggedIn ? "✓ HF Connected" : "Login with HuggingFace"}
2171
+ </Button>
2172
+
2173
+ {/* Model Selector - only show when logged in */}
2174
+ {isHfProLoggedIn && (
2175
+ <>
2176
+ <label htmlFor="hf-model" className="text-sm font-medium text-muted-foreground">
2177
+ Model:
2178
+ </label>
2179
+ <select
2180
+ id="hf-model"
2181
+ value={selectedHfModel}
2182
+ onChange={(e) => setSelectedHfModel(e.target.value as keyof typeof HF_MODELS)}
2183
+ className="h-8 px-2 text-sm bg-background border border-border rounded-md focus:outline-none focus:ring-2 focus:ring-ring"
2184
+ >
2185
+ {Object.entries(HF_MODELS).map(([key, model]) => (
2186
+ <option key={key} value={key}>
2187
+ {model.name} ({model.type})
2188
+ </option>
2189
+ ))}
2190
+ </select>
2191
+ </>
2192
+ )}
2193
+ </>
2194
+ )}
2195
+
2196
+ <div className="h-6 w-px bg-border" />
2197
  <Button
2198
  variant="outline"
2199
  size="sm"
 
2203
  >
2204
  Help
2205
  </Button>
 
 
2206
  </div>
2207
  </header>
2208
 
 
2210
  {showHelpSidebar && (
2211
  <>
2212
  {/* Backdrop */}
2213
+ <div
2214
  className="fixed inset-0 bg-black/50 z-[9998]"
2215
  onClick={() => setShowHelpSidebar(false)}
2216
  />
 
2228
  <span className="text-lg">×</span>
2229
  </Button>
2230
  </div>
2231
+
2232
  <div className="space-y-6">
2233
+ {/* Processing Modes Explanation */}
 
2234
  <div>
2235
+ <h3 className="font-semibold mb-3 text-foreground">⚙️ Processing Modes</h3>
2236
  <div className="text-sm text-muted-foreground space-y-3">
2237
  <div className="p-3 bg-primary/10 border border-primary/20 rounded-lg">
2238
+ <p className="font-medium text-primary mb-2">🍌 Nano Banana Pro (Gemini API)</p>
2239
+ <p>Uses Google's Gemini API. <strong>Supports ALL nodes</strong> including MERGE for combining multiple images into group photos.</p>
2240
+ <p className="mt-1 text-xs">Requires a Google Gemini API key from <a href="https://aistudio.google.com/app/apikey" target="_blank" rel="noopener noreferrer" className="text-primary hover:underline">AI Studio</a>.</p>
2241
  </div>
2242
  <div className="p-3 bg-secondary border border-border rounded-lg">
2243
+ <p className="font-medium text-secondary-foreground mb-2">🤗 HuggingFace Models</p>
2244
+ <p>Uses HuggingFace inference API with models like FLUX.1-Kontext and Qwen-Image-Edit. Supports single-image editing nodes.</p>
2245
+ <p className="mt-1 text-xs">Requires HuggingFace login. Uses your HF inference credits.</p>
 
 
 
2246
  </div>
2247
  </div>
2248
  </div>
2249
+
2250
+ {/* MERGE Warning */}
2251
+ <div className="p-4 bg-destructive/10 border border-destructive/30 rounded-lg">
2252
+ <h4 className="font-semibold text-destructive mb-2">⚠️ MERGE Node Limitation</h4>
2253
+ <p className="text-sm text-muted-foreground">
2254
+ The <strong>MERGE</strong> node requires <strong>Nano Banana Pro</strong> because it combines multiple images into one cohesive group photo. HuggingFace models only accept single images, so MERGE won't work in HuggingFace mode.
2255
+ </p>
2256
+ </div>
2257
+
2258
+ {/* Available HF Models */}
2259
  <div>
2260
+ <h3 className="font-semibold mb-3 text-foreground">🤖 HuggingFace Models</h3>
2261
+ <div className="text-sm text-muted-foreground space-y-2">
2262
+ <div className="p-2 bg-muted/50 rounded">
2263
+ <p className="font-medium">FLUX.1 Kontext</p>
2264
+ <p className="text-xs">Image editing with context understanding</p>
2265
  </div>
2266
+ <div className="p-2 bg-muted/50 rounded">
2267
+ <p className="font-medium">Qwen Image Edit</p>
2268
+ <p className="text-xs">Powerful image editing and manipulation</p>
2269
  </div>
2270
+ <div className="p-2 bg-muted/50 rounded">
2271
+ <p className="font-medium">FLUX.1 Dev</p>
2272
+ <p className="text-xs">Text-to-image generation (for CHARACTER nodes)</p>
2273
  </div>
2274
  </div>
2275
  </div>
2276
+
2277
+ {/* How to Use */}
2278
  <div>
2279
+ <h3 className="font-semibold mb-3 text-foreground">🎨 How to Use the Editor</h3>
2280
+ <div className="text-sm text-muted-foreground space-y-2">
2281
+ <p>• <strong>Adding Nodes:</strong> Right-click on the canvas to add nodes</p>
2282
+ <p>• <strong>Character Nodes:</strong> Upload or drag images as starting points</p>
2283
+ <p>• <strong>Merge Nodes:</strong> Connect multiple characters (Nano Banana Pro only)</p>
2284
+ <p>• <strong>Editing Nodes:</strong> Background, Style, Face, Age, Camera, etc.</p>
2285
+ <p>• <strong>Connecting:</strong> Drag from output port to input port</p>
2286
+ </div>
2287
+ </div>
2288
+
2289
+ {/* Privacy */}
2290
  <div className="p-4 bg-muted border border-border rounded-lg">
2291
  <h4 className="font-semibold text-foreground mb-2">🔒 Privacy & Security</h4>
2292
  <div className="text-sm text-muted-foreground space-y-1">
2293
+ <p>• Gemini API keys are stored locally in your browser</p>
2294
+ <p>• HuggingFace tokens are stored in secure HTTP-only cookies</p>
2295
+ <p>• All processing happens through official APIs</p>
2296
+ <p>• No data is stored on our servers</p>
 
 
 
 
 
 
 
2297
  </div>
2298
  </div>
2299
  </div>
 
2328
  >
2329
  <div
2330
  className="absolute left-0 top-0 will-change-transform"
2331
+ style={{
2332
+ transform: `translate3d(${tx}px, ${ty}px, 0) scale(${scale})`,
2333
  transformOrigin: "0 0",
2334
  transformStyle: "preserve-3d",
2335
  backfaceVisibility: "hidden"
2336
  }}
2337
  >
2338
+ <svg
2339
+ className="absolute pointer-events-none z-0"
2340
+ style={{
2341
+ left: `${svgBounds.x}px`,
2342
  top: `${svgBounds.y}px`,
2343
  width: `${svgBounds.width}px`,
2344
  height: `${svgBounds.height}px`
 
2347
  >
2348
  <defs>
2349
  <filter id="glow">
2350
+ <feGaussianBlur stdDeviation="3" result="coloredBlur" />
2351
  <feMerge>
2352
+ <feMergeNode in="coloredBlur" />
2353
+ <feMergeNode in="SourceGraphic" />
2354
  </feMerge>
2355
  </filter>
2356
  </defs>
 
2411
  onEndConnection={handleEndSingleConnection}
2412
  onProcess={processNode}
2413
  onUpdatePosition={updateNodePosition}
 
2414
  />
2415
  );
2416
  case "CLOTHES":
 
2450
  onEndConnection={handleEndSingleConnection}
2451
  onProcess={processNode}
2452
  onUpdatePosition={updateNodePosition}
 
2453
  />
2454
  );
2455
  case "CAMERA":
 
2526
 
2527
  {menuOpen && (
2528
  <div
2529
+ className="absolute z-50 rounded-xl border border-border bg-popover/95 backdrop-blur p-1 w-56 shadow-2xl text-popover-foreground"
2530
  style={{ left: menuPos.x, top: menuPos.y }}
2531
  onMouseLeave={() => setMenuOpen(false)}
2532
  >
2533
+ <div className="px-3 py-2 text-xs text-muted-foreground">Add node</div>
2534
+ <div
2535
  className="max-h-[300px] overflow-y-auto scrollbar-thin pr-1"
2536
  onWheel={(e) => e.stopPropagation()}
2537
  >
2538
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("CHARACTER")}>CHARACTER</button>
2539
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("MERGE")}>MERGE</button>
2540
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("BACKGROUND")}>BACKGROUND</button>
2541
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("CLOTHES")}>CLOTHES</button>
2542
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("STYLE")}>STYLE</button>
2543
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("EDIT")}>EDIT</button>
2544
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("CAMERA")}>CAMERA</button>
2545
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("AGE")}>AGE</button>
2546
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("FACE")}>FACE</button>
2547
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("LIGHTNING")}>LIGHTNING</button>
2548
+ <button className="w-full text-left px-3 py-2 text-sm hover:bg-accent hover:text-accent-foreground rounded-lg transition-colors" onClick={() => addFromMenu("POSES")}>POSES</button>
2549
  </div>
2550
  </div>
2551
  )}
components/mode-toggle.tsx ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client"
2
+
3
+ import * as React from "react"
4
+ import { Moon, Sun } from "lucide-react"
5
+ import { useTheme } from "next-themes"
6
+
7
+ import { Button } from "@/components/ui/button"
8
+
9
+ export function ModeToggle() {
10
+ const { setTheme, theme } = useTheme()
11
+
12
+ return (
13
+ <Button
14
+ variant="outline"
15
+ size="icon"
16
+ className="fixed bottom-4 right-4 z-50 rounded-full h-12 w-12 shadow-lg bg-background border-border"
17
+ onClick={() => setTheme(theme === "dark" ? "light" : "dark")}
18
+ title="Toggle theme"
19
+ >
20
+ <Sun className="h-[1.2rem] w-[1.2rem] rotate-0 scale-100 transition-all dark:-rotate-90 dark:scale-0 text-orange-500" />
21
+ <Moon className="absolute h-[1.2rem] w-[1.2rem] rotate-90 scale-0 transition-all dark:rotate-0 dark:scale-100 text-blue-400" />
22
+ <span className="sr-only">Toggle theme</span>
23
+ </Button>
24
+ )
25
+ }
components/theme-provider.tsx ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client"
2
+
3
+ import * as React from "react"
4
+ import { ThemeProvider as NextThemesProvider } from "next-themes"
5
+
6
+ export function ThemeProvider({
7
+ children,
8
+ ...props
9
+ }: React.ComponentProps<typeof NextThemesProvider>) {
10
+ return <NextThemesProvider {...props}>{children}</NextThemesProvider>
11
+ }
next.config.ts CHANGED
@@ -8,6 +8,16 @@ const nextConfig: NextConfig = {
8
  serverRuntimeConfig: {
9
  bodySizeLimit: '50mb',
10
  },
 
 
 
 
 
 
 
 
 
 
11
  };
12
 
13
  export default nextConfig;
 
8
  serverRuntimeConfig: {
9
  bodySizeLimit: '50mb',
10
  },
11
+ // Redirect /editor to main page
12
+ async redirects() {
13
+ return [
14
+ {
15
+ source: '/editor',
16
+ destination: '/',
17
+ permanent: true,
18
+ },
19
+ ];
20
+ },
21
  };
22
 
23
  export default nextConfig;
package-lock.json CHANGED
@@ -16,6 +16,7 @@
16
  "clsx": "^2.1.1",
17
  "lucide-react": "^0.542.0",
18
  "next": "15.5.2",
 
19
  "react": "19.1.0",
20
  "react-dom": "19.1.0",
21
  "tailwind-merge": "^2.5.3"
@@ -5032,6 +5033,16 @@
5032
  }
5033
  }
5034
  },
 
 
 
 
 
 
 
 
 
 
5035
  "node_modules/next/node_modules/postcss": {
5036
  "version": "8.4.31",
5037
  "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
 
16
  "clsx": "^2.1.1",
17
  "lucide-react": "^0.542.0",
18
  "next": "15.5.2",
19
+ "next-themes": "^0.4.6",
20
  "react": "19.1.0",
21
  "react-dom": "19.1.0",
22
  "tailwind-merge": "^2.5.3"
 
5033
  }
5034
  }
5035
  },
5036
+ "node_modules/next-themes": {
5037
+ "version": "0.4.6",
5038
+ "resolved": "https://registry.npmjs.org/next-themes/-/next-themes-0.4.6.tgz",
5039
+ "integrity": "sha512-pZvgD5L0IEvX5/9GWyHMf3m8BKiVQwsCMHfoFosXtXBMnaS0ZnIJ9ST4b4NqLVKDEm8QBxoNNGNaBv2JNF6XNA==",
5040
+ "license": "MIT",
5041
+ "peerDependencies": {
5042
+ "react": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc",
5043
+ "react-dom": "^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc"
5044
+ }
5045
+ },
5046
  "node_modules/next/node_modules/postcss": {
5047
  "version": "8.4.31",
5048
  "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz",
package.json CHANGED
@@ -17,6 +17,7 @@
17
  "clsx": "^2.1.1",
18
  "lucide-react": "^0.542.0",
19
  "next": "15.5.2",
 
20
  "react": "19.1.0",
21
  "react-dom": "19.1.0",
22
  "tailwind-merge": "^2.5.3"
 
17
  "clsx": "^2.1.1",
18
  "lucide-react": "^0.542.0",
19
  "next": "15.5.2",
20
+ "next-themes": "^0.4.6",
21
  "react": "19.1.0",
22
  "react-dom": "19.1.0",
23
  "tailwind-merge": "^2.5.3"