ariG23498 HF Staff commited on
Commit
cb54450
·
verified ·
1 Parent(s): 6e6306b

Upload Qwen_Qwen-Image_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen-Image_0.txt +22 -13
Qwen_Qwen-Image_0.txt CHANGED
@@ -11,7 +11,7 @@ image = pipe(prompt).images[0]
11
 
12
  ERROR:
13
  Traceback (most recent call last):
14
- File "/tmp/Qwen_Qwen-Image_0wu1agr.py", line 27, in <module>
15
  pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", dtype=torch.bfloat16, device_map="cuda")
16
  File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
17
  return fn(*args, **kwargs)
@@ -23,21 +23,30 @@ Traceback (most recent call last):
23
  )
24
  File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
25
  loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
26
- File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
27
- return fn(*args, **kwargs)
28
- File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1288, in from_pretrained
29
  ) = cls._load_pretrained_model(
30
  ~~~~~~~~~~~~~~~~~~~~~~~~~~^
31
  model,
32
  ^^^^^^
33
- ...<13 lines>...
34
- is_parallel_loading_enabled=is_parallel_loading_enabled,
35
- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
36
  )
37
  ^
38
- File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/diffusers/models/modeling_utils.py", line 1537, in _load_pretrained_model
39
- _caching_allocator_warmup(model, expanded_device_map, dtype, hf_quantizer)
40
- ~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
41
- File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/diffusers/models/model_loading_utils.py", line 754, in _caching_allocator_warmup
42
- _ = torch.empty(warmup_elems, dtype=dtype, device=device, requires_grad=False)
43
- torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 38.05 GiB. GPU 0 has a total capacity of 22.03 GiB of which 21.34 GiB is free. Including non-PyTorch memory, this process has 700.00 MiB memory in use. Of the allocated memory 494.18 MiB is allocated by PyTorch, and 19.82 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
 
 
 
 
 
 
 
 
 
 
11
 
12
  ERROR:
13
  Traceback (most recent call last):
14
+ File "/tmp/Qwen_Qwen-Image_0HCSIfj.py", line 27, in <module>
15
  pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", dtype=torch.bfloat16, device_map="cuda")
16
  File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn
17
  return fn(*args, **kwargs)
 
23
  )
24
  File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/diffusers/pipelines/pipeline_loading_utils.py", line 860, in load_sub_model
25
  loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
26
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
27
+ return func(*args, **kwargs)
28
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/transformers/modeling_utils.py", line 5048, in from_pretrained
29
  ) = cls._load_pretrained_model(
30
  ~~~~~~~~~~~~~~~~~~~~~~~~~~^
31
  model,
32
  ^^^^^^
33
+ ...<12 lines>...
34
+ weights_only=weights_only,
35
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
36
  )
37
  ^
38
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/transformers/modeling_utils.py", line 5468, in _load_pretrained_model
39
+ _error_msgs, disk_offload_index = load_shard_file(args)
40
+ ~~~~~~~~~~~~~~~^^^^^^
41
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/transformers/modeling_utils.py", line 843, in load_shard_file
42
+ disk_offload_index = _load_state_dict_into_meta_model(
43
+ model,
44
+ ...<8 lines>...
45
+ device_mesh=device_mesh,
46
+ )
47
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/torch/utils/_contextlib.py", line 120, in decorate_context
48
+ return func(*args, **kwargs)
49
+ File "/tmp/.cache/uv/environments-v2/29dc976ff856b6da/lib/python3.13/site-packages/transformers/modeling_utils.py", line 770, in _load_state_dict_into_meta_model
50
+ _load_parameter_into_model(model, param_name, param.to(param_device))
51
+ ~~~~~~~~^^^^^^^^^^^^^^
52
+ torch.OutOfMemoryError: CUDA out of memory. Tried to allocate 260.00 MiB. GPU 0 has a total capacity of 22.03 GiB of which 183.12 MiB is free. Including non-PyTorch memory, this process has 21.85 GiB memory in use. Of the allocated memory 21.56 GiB is allocated by PyTorch, and 112.29 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)