diff --git a/genXL.py b/genXL.py index bd62258..3e0ace8 100644 --- a/genXL.py +++ b/genXL.py @@ -8,7 +8,7 @@ import os def loadllama(): llm = Llama( - model_path="models/llama3.gguf", + model_path="models/.gguf", n_ctx=4096, n_threads=6, n_gpu_layers=-1, @@ -19,7 +19,7 @@ def loadllama(): def loadtts(): model_config = outetts.GGUFModelConfig_v1( - model_path="models/tts.gguf", + model_path="models/.gguf", language="en", n_gpu_layers=-1, verbose=False, @@ -30,7 +30,7 @@ def loadtts(): return interface, speaker def loadsdxl(): - pipe = StableDiffusionXLPipeline.from_single_file("models/sdxlReal.safetensors", torch_dtype=torch.float16, variant="fp16", requires_safety_checker=True) + pipe = StableDiffusionXLPipeline.from_single_file("models/.safetensors", torch_dtype=torch.float16, variant="fp16", requires_safety_checker=True) pipe.to("cuda") pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload()