Update app.py
Browse files
app.py
CHANGED
|
@@ -14,19 +14,23 @@ from transformers import pipeline
|
|
| 14 |
import gradio as gr
|
| 15 |
import os
|
| 16 |
from huggingface_hub import login
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
-
#
|
| 19 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 20 |
if HF_TOKEN is None:
|
| 21 |
raise ValueError("Please set the HF_TOKEN environment variable")
|
| 22 |
login(token=HF_TOKEN)
|
| 23 |
|
| 24 |
-
# ๋ชจ๋ธ ์ค์
|
| 25 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 26 |
model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
|
| 27 |
clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
|
| 28 |
|
| 29 |
-
# FLUX ๋ชจ๋ธ ์ด๊ธฐํ (ํ ๋ฒ๋ง)
|
| 30 |
fashion_pipe = DiffusionPipeline.from_pretrained(
|
| 31 |
base_model,
|
| 32 |
torch_dtype=torch.bfloat16,
|
|
@@ -34,16 +38,13 @@ fashion_pipe = DiffusionPipeline.from_pretrained(
|
|
| 34 |
)
|
| 35 |
fashion_pipe.to("cuda")
|
| 36 |
|
| 37 |
-
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
| 38 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
import random
|
| 42 |
-
MAX_SEED = 2**32 - 1
|
| 43 |
-
|
| 44 |
-
# Download checkpoints
|
| 45 |
snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
|
| 46 |
|
|
|
|
| 47 |
mask_predictor = AutoMasker(
|
| 48 |
densepose_path="./ckpts/densepose",
|
| 49 |
schp_path="./ckpts/schp",
|
|
@@ -66,16 +67,6 @@ pt_model = LeffaModel(
|
|
| 66 |
)
|
| 67 |
pt_inference = LeffaInference(model=pt_model)
|
| 68 |
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
| 72 |
-
base_model = "black-forest-labs/FLUX.1-dev"
|
| 73 |
-
model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
|
| 74 |
-
clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
|
| 75 |
-
|
| 76 |
-
fashion_pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
|
| 77 |
-
fashion_pipe.to("cuda")
|
| 78 |
-
|
| 79 |
def contains_korean(text):
|
| 80 |
return any(ord('๊ฐ') <= ord(char) <= ord('ํฃ') for char in text)
|
| 81 |
|
|
|
|
| 14 |
import gradio as gr
|
| 15 |
import os
|
| 16 |
from huggingface_hub import login
|
| 17 |
+
import random
|
| 18 |
+
|
| 19 |
+
# ์์ ์ ์
|
| 20 |
+
MAX_SEED = 2**32 - 1
|
| 21 |
|
| 22 |
+
# Hugging Face ํ ํฐ ์ค์ ๋ฐ ๋ก๊ทธ์ธ
|
| 23 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 24 |
if HF_TOKEN is None:
|
| 25 |
raise ValueError("Please set the HF_TOKEN environment variable")
|
| 26 |
login(token=HF_TOKEN)
|
| 27 |
|
| 28 |
+
# ๋ชจ๋ธ ์ค์ (ํ ๋ฒ๋ง ์ ์ธ)
|
| 29 |
base_model = "black-forest-labs/FLUX.1-dev"
|
| 30 |
model_lora_repo = "Motas/Flux_Fashion_Photography_Style"
|
| 31 |
clothes_lora_repo = "prithivMLmods/Canopus-Clothing-Flux-LoRA"
|
| 32 |
|
| 33 |
+
# FLUX ๋ชจ๋ธ ์ด๊ธฐํ (ํ ๋ฒ๋ง ์ด๊ธฐํ)
|
| 34 |
fashion_pipe = DiffusionPipeline.from_pretrained(
|
| 35 |
base_model,
|
| 36 |
torch_dtype=torch.bfloat16,
|
|
|
|
| 38 |
)
|
| 39 |
fashion_pipe.to("cuda")
|
| 40 |
|
| 41 |
+
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ (ํ ๋ฒ๋ง)
|
| 42 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
| 43 |
|
| 44 |
+
# Leffa ์ฒดํฌํฌ์ธํธ ๋ค์ด๋ก๋
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
snapshot_download(repo_id="franciszzj/Leffa", local_dir="./ckpts")
|
| 46 |
|
| 47 |
+
# Leffa ๊ด๋ จ ๋ชจ๋ธ ์ด๊ธฐํ
|
| 48 |
mask_predictor = AutoMasker(
|
| 49 |
densepose_path="./ckpts/densepose",
|
| 50 |
schp_path="./ckpts/schp",
|
|
|
|
| 67 |
)
|
| 68 |
pt_inference = LeffaInference(model=pt_model)
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
def contains_korean(text):
|
| 71 |
return any(ord('๊ฐ') <= ord(char) <= ord('ํฃ') for char in text)
|
| 72 |
|