Hshsh0877 commited on
Commit
12e33e8
·
verified ·
1 Parent(s): fb9ca87

Create inference.py

Browse files
Files changed (1) hide show
  1. inference.py +185 -0
inference.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import subprocess
4
+ from pathlib import Path
5
+ from urllib.parse import urlparse, parse_qs
6
+
7
+ import requests
8
+ import yt_dlp
9
+
10
+ # ====== CONFIG ====== #
11
+
12
+ HF_API_KEY = os.getenv("HF_API_KEY") # you will set this in Space Settings
13
+ HF_VLM_MODEL = os.getenv("HF_VLM_MODEL", "Qwen/Qwen2.5-VL-3B-Instruct")
14
+ HF_API_URL = f"https://api-inference.huggingface.co/models/{HF_VLM_MODEL}"
15
+
16
+ # ====== YOUTUBE HELPERS ====== #
17
+
18
+ def get_youtube_video_id(url: str) -> str | None:
19
+ try:
20
+ p = urlparse(url)
21
+ if p.netloc in ("youtu.be", "www.youtu.be"):
22
+ return p.path.lstrip("/")
23
+ if "youtube.com" in p.netloc:
24
+ qs = parse_qs(p.query)
25
+ return qs.get("v", [None])[0]
26
+ except Exception:
27
+ return None
28
+ return None
29
+
30
+ def download_video(url: str, out_path: str = "video.mp4") -> tuple[str, float]:
31
+ """
32
+ Download YouTube video to out_path and return (path, duration_seconds)
33
+ """
34
+ ydl_opts = {
35
+ "format": "mp4",
36
+ "outtmpl": out_path,
37
+ "quiet": True,
38
+ }
39
+ with yt_dlp.YoutubeDL(ydl_opts) as ydl:
40
+ info = ydl.extract_info(url, download=True)
41
+ duration = float(info.get("duration", 60.0)) # default 60s if unknown
42
+ return out_path, duration
43
+
44
+ # ====== VLM CALL ====== #
45
+
46
+ def call_vlm_with_thumbnail(thumb_url: str, product_text: str) -> dict:
47
+ """
48
+ Ask the VLM where to put the product in the frame.
49
+ Returns a dict with bbox etc.
50
+ """
51
+ if not HF_API_KEY:
52
+ raise RuntimeError("HF_API_KEY is not set in Space variables.")
53
+
54
+ system_prompt = """
55
+ You are an AI video ad editor.
56
+
57
+ You see a single video thumbnail and a product/brand description.
58
+ Choose where in the image the product could naturally appear.
59
+
60
+ Return ONLY JSON with this schema:
61
+ {
62
+ "naturalness_score": float,
63
+ "reason": "short reason",
64
+ "bbox": {
65
+ "x": float, // center x between 0 and 1
66
+ "y": float, // center y between 0 and 1
67
+ "w": float, // width between 0 and 1
68
+ "h": float // height between 0 and 1
69
+ },
70
+ "object_description": "short description",
71
+ "new_subtitle": "optional subtitle text or empty string"
72
+ }
73
+ """
74
+
75
+ user_prompt = f"Product / brand description:\n{product_text}\n\nImage URL: {thumb_url}"
76
+
77
+ payload = {
78
+ "inputs": {
79
+ "prompt": system_prompt + "\n\n" + user_prompt,
80
+ "image": thumb_url,
81
+ },
82
+ "parameters": {
83
+ "max_new_tokens": 320
84
+ },
85
+ "options": {
86
+ "wait_for_model": True
87
+ }
88
+ }
89
+
90
+ headers = {"Authorization": f"Bearer {HF_API_KEY}"}
91
+ r = requests.post(HF_API_URL, headers=headers, json=payload, timeout=180)
92
+
93
+ if r.status_code >= 400:
94
+ raise RuntimeError(f"Hugging Face error {r.status_code}: {r.text[:200]}")
95
+
96
+ data = r.json()
97
+
98
+ # Many HF endpoints return [{"generated_text": "..."}]
99
+ if isinstance(data, list) and len(data) > 0 and isinstance(data[0], dict) and "generated_text" in data[0]:
100
+ text = data[0]["generated_text"]
101
+ elif isinstance(data, dict) and "generated_text" in data:
102
+ text = data["generated_text"]
103
+ elif isinstance(data, dict):
104
+ return data
105
+ else:
106
+ text = str(data)
107
+
108
+ # Try to parse JSON inside the text
109
+ try:
110
+ return json.loads(text)
111
+ except json.JSONDecodeError:
112
+ start = text.find("{")
113
+ end = text.rfind("}")
114
+ if start != -1 and end != -1 and end > start:
115
+ return json.loads(text[start:end+1])
116
+ raise RuntimeError(f"VLM did not return valid JSON: {text[:200]}")
117
+
118
+ # ====== FFMPEG OVERLAY (simple box) ====== #
119
+
120
+ def overlay_product_box(
121
+ input_video: str,
122
+ output_video: str,
123
+ bbox: dict,
124
+ t_start: float,
125
+ t_end: float,
126
+ ) -> str:
127
+ """
128
+ Draw a semi-transparent colored box where the product should appear
129
+ between t_start and t_end seconds.
130
+ bbox coords are normalized [0,1] and treated as CENTER.
131
+ """
132
+ x = float(bbox.get("x", 0.5))
133
+ y = float(bbox.get("y", 0.5))
134
+ w = float(bbox.get("w", 0.2))
135
+ h = float(bbox.get("h", 0.2))
136
+
137
+ # ffmpeg expression: use iw/ih (image width/height)
138
+ # Center-based box:
139
+ drawbox = (
140
+ f"drawbox=x=iw*({x}-{w}/2):y=ih*({y}-{h}/2):"
141
+ f"w=iw*{w}:h=ih*{h}:[email protected]:t=fill:"
142
+ f"enable='between(t,{t_start},{t_end})'"
143
+ )
144
+
145
+ cmd = [
146
+ "ffmpeg",
147
+ "-y",
148
+ "-i", input_video,
149
+ "-vf", drawbox,
150
+ "-c:a", "copy",
151
+ output_video,
152
+ ]
153
+ subprocess.run(cmd, check=True)
154
+ return output_video
155
+
156
+ # ====== MAIN PIPELINE ====== #
157
+
158
+ def run_halftime_pipeline(youtube_url: str, product_text: str) -> str:
159
+ """
160
+ 1. Figure out YouTube ID and thumbnail URL.
161
+ 2. Ask VLM for bbox.
162
+ 3. Download video.
163
+ 4. Draw colored box where product should appear for 3 seconds.
164
+ 5. Return path to edited video.
165
+ """
166
+ vid = get_youtube_video_id(youtube_url)
167
+ if not vid:
168
+ raise RuntimeError("Could not parse YouTube video ID.")
169
+
170
+ thumb_url = f"https://img.youtube.com/vi/{vid}/maxresdefault.jpg"
171
+
172
+ placement = call_vlm_with_thumbnail(thumb_url, product_text)
173
+ bbox = placement.get("bbox", {"x": 0.5, "y": 0.5, "w": 0.2, "h": 0.2})
174
+
175
+ video_path, duration = download_video(youtube_url, "input.mp4")
176
+
177
+ # Choose middle of the video as the 'ad moment'
178
+ center = duration / 2.0
179
+ t_start = max(center - 1.5, 0.0)
180
+ t_end = min(center + 1.5, duration)
181
+
182
+ output_path = "edited.mp4"
183
+ overlay_product_box(video_path, output_path, bbox, t_start, t_end)
184
+
185
+ return output_path