File size: 959 Bytes
8e4b807
c831db9
cc35f2f
c831db9
 
 
cc35f2f
c831db9
498acb0
c831db9
c653990
c831db9
c653990
c831db9
f9844ba
c099fb3
cc35f2f
369047a
fabbc8c
 
 
3322500
fabbc8c
 
 
 
6dcf272
fabbc8c
11d6700
fabbc8c
6dcf272
fabbc8c
 
 
e738319
fabbc8c
bb1034f
fabbc8c
8c9c0e6
fabbc8c
 
 
 
 
 
 
 
 
 
 
6dcf272
fabbc8c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
git+https://github.com/huggingface/diffusers.git

torchvision==0.20.1

pipeline

transformers==4.46.3

git+https://github.com/huggingface/accelerate.git

safetensors==0.4.5

peft==0.13.2

gradio[oauth]==5.7.1

paramiko

huggingface_hub


nvidia-cublas-cu12==12.4.5.8
    # via
    #   nvidia-cudnn-cu12
    #   nvidia-cusolver-cu12
    #   torch
nvidia-cuda-cupti-cu12==12.4.127
    # via torch
nvidia-cuda-nvrtc-cu12==12.4.127
    # via torch
nvidia-cuda-runtime-cu12==12.4.127
    # via torch
nvidia-cudnn-cu12==9.1.0.70
    # via torch
nvidia-cufft-cu12==11.2.1.3
    # via torch
nvidia-curand-cu12==10.3.5.147
    # via torch
nvidia-cusolver-cu12==11.6.1.9
    # via torch
nvidia-cusparse-cu12==12.1.0.106
    # via
    #   nvidia-cusolver-cu12
    #   torch
nvidia-nccl-cu12==2.20.5
    # via torch
nvidia-nvjitlink-cu12==12.6.77
    # via
    #   nvidia-cusolver-cu12
    #   nvidia-cusparse-cu12
nvidia-nvtx-cu12==12.4.127

triton

tqdm

pandas

pillow