ariG23498 HF Staff commited on
Commit
55bd634
·
verified ·
1 Parent(s): 45b0a86

Upload Lightricks_LTX-Video_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. Lightricks_LTX-Video_0.txt +20 -5
Lightricks_LTX-Video_0.txt CHANGED
@@ -18,7 +18,7 @@ export_to_video(output, "output.mp4")
18
 
19
  ERROR:
20
  Traceback (most recent call last):
21
- File "/tmp/Lightricks_LTX-Video_0QlFD3T.py", line 26, in <module>
22
  pipe = DiffusionPipeline.from_pretrained("Lightricks/LTX-Video", dtype=torch.bfloat16, device_map="cuda")
23
  File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
24
  return fn(*args, **kwargs)
@@ -28,8 +28,23 @@ Traceback (most recent call last):
28
  ...<21 lines>...
29
  quantization_config=quantization_config,
30
  )
31
- File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 778, in load_sub_model
32
- raise ValueError(
33
- ...<2 lines>...
 
 
 
 
 
 
 
 
 
34
  )
35
- ValueError: The component <class 'transformers.models.t5.tokenization_t5._LazyModule.__getattr__.<locals>.Placeholder'> of <class 'diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline'> cannot be loaded as it does not seem to have any of the loading methods defined in {'ModelMixin': ['save_pretrained', 'from_pretrained'], 'SchedulerMixin': ['save_pretrained', 'from_pretrained'], 'DiffusionPipeline': ['save_pretrained', 'from_pretrained'], 'OnnxRuntimeModel': ['save_pretrained', 'from_pretrained'], 'PreTrainedTokenizer': ['save_pretrained', 'from_pretrained'], 'PreTrainedTokenizerFast': ['save_pretrained', 'from_pretrained'], 'PreTrainedModel': ['save_pretrained', 'from_pretrained'], 'FeatureExtractionMixin': ['save_pretrained', 'from_pretrained'], 'ProcessorMixin': ['save_pretrained', 'from_pretrained'], 'ImageProcessingMixin': ['save_pretrained', 'from_pretrained'], 'ORTModule': ['save_pretrained', 'from_pretrained']}.
 
 
 
 
 
 
 
18
 
19
  ERROR:
20
  Traceback (most recent call last):
21
+ File "/tmp/Lightricks_LTX-Video_0PrdE45.py", line 28, in <module>
22
  pipe = DiffusionPipeline.from_pretrained("Lightricks/LTX-Video", dtype=torch.bfloat16, device_map="cuda")
23
  File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
24
  return fn(*args, **kwargs)
 
28
  ...<21 lines>...
29
  quantization_config=quantization_config,
30
  )
31
+ File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
32
+ loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
33
+ File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
34
+ return fn(*args, **kwargs)
35
+ File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1288, in from_pretrained
36
+ ) = cls._load_pretrained_model(
37
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
38
+ model,
39
+ ^^^^^^
40
+ ...<13 lines>...
41
+ is_parallel_loading_enabled=is_parallel_loading_enabled,
42
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
43
  )
44
+ ^
45
+ File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1537, in _load_pretrained_model
46
+ _caching_allocator_warmup(model, expanded_device_map, dtype, hf_quantizer)
47
+ ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
48
+ File "/tmp/.cache/uv/environments-v2/cb88948adb0dfc46/lib/python3.13/site-packages/diffusers/models/model_loading_utils.py", line 754, in _caching_allocator_warmup
49
+ _ = torch.empty(warmup_elems, dtype=dtype, device=device, requires_grad=False)
50
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 3.58 GiB. GPU 0 has a total capacity of 22.03 GiB of which 2.49 GiB is free. Including non-PyTorch memory, this process has 19.53 GiB memory in use. Of the allocated memory 19.33 GiB is allocated by PyTorch, and 27.19 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)