Tomtom84 commited on
Commit
ad94d02
·
0 Parent(s):
Files changed (7) hide show
  1. .gitattributes +35 -0
  2. Dockerfile +15 -0
  3. README.md +10 -0
  4. __pycache__/app.cpython-312.pyc +0 -0
  5. _bu.md +38 -0
  6. app.py +46 -0
  7. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.12
2
+
3
+ RUN useradd -m -u 1000 user
4
+ USER user
5
+ ENV PATH="/home/user/.local/bin:$PATH"
6
+ # ^ when run as `user`, pip installs executables there
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user ./requirements.txt requirements.txt
11
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
12
+
13
+ COPY --chown=user . /app
14
+
15
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: "Example: Python"
3
+ emoji: 🔥
4
+ colorFrom: green
5
+ colorTo: purple
6
+ sdk: docker
7
+ pinned: false
8
+ ---
9
+
10
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__pycache__/app.cpython-312.pyc ADDED
Binary file (381 Bytes). View file
 
_bu.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import outetts
2
+ import os
3
+
4
+ # Initialize the interface
5
+ interface = outetts.Interface(
6
+ config=outetts.ModelConfig.auto_config(
7
+ model=outetts.Models.VERSION_1_0_SIZE_1B,
8
+ # For llama.cpp backend
9
+ #backend=outetts.Backend.LLAMACPP,
10
+ #quantization=outetts.LlamaCppQuantization.FP16
11
+ # For transformers backend
12
+ backend=outetts.Backend.HF,
13
+ )
14
+ )
15
+
16
+ # Load the default speaker profile
17
+ speaker = interface.load_default_speaker("EN-FEMALE-1-NEUTRAL")
18
+
19
+ # Or create your own speaker profiles in seconds and reuse them instantly
20
+ # speaker = interface.create_speaker("path/to/audio.wav")
21
+ # interface.save_speaker(speaker, "speaker.json")
22
+ # speaker = interface.load_speaker("speaker.json")
23
+
24
+ # Generate speech
25
+ output = interface.generate(
26
+ config=outetts.GenerationConfig(
27
+ text="Hello, how are you doing?",
28
+ generation_type=outetts.GenerationType.CHUNKED,
29
+ speaker=speaker,
30
+ sampler_config=outetts.SamplerConfig(
31
+ temperature=0.4
32
+ ),
33
+ )
34
+ )
35
+
36
+ # Save to file
37
+ output_path = os.path.join(os.getcwd(),"output.wav")
38
+ output.save(output_path)
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+
3
+ import outetts
4
+ import os
5
+
6
+ # Initialize the interface
7
+ interface = outetts.Interface(
8
+ config=outetts.ModelConfig.auto_config(
9
+ model=outetts.Models.VERSION_1_0_SIZE_1B,
10
+ # For llama.cpp backend
11
+ backend=outetts.Backend.LLAMACPP,
12
+ quantization=outetts.LlamaCppQuantization.FP16
13
+ # For transformers backend
14
+ #backend=outetts.Backend.HF,
15
+ )
16
+ )
17
+
18
+ # Load the default speaker profile
19
+ speaker = interface.load_default_speaker("EN-FEMALE-1-NEUTRAL")
20
+
21
+ # Or create your own speaker profiles in seconds and reuse them instantly
22
+ # speaker = interface.create_speaker("path/to/audio.wav")
23
+ # interface.save_speaker(speaker, "speaker.json")
24
+ # speaker = interface.load_speaker("speaker.json")
25
+
26
+ # Generate speech
27
+ output = interface.generate(
28
+ config=outetts.GenerationConfig(
29
+ text="Hello, how are you doing?",
30
+ generation_type=outetts.GenerationType.CHUNKED,
31
+ speaker=speaker,
32
+ sampler_config=outetts.SamplerConfig(
33
+ temperature=0.4
34
+ ),
35
+ )
36
+ )
37
+
38
+ # Save to file
39
+ output_path = os.path.join(os.getcwd(),"output.wav")
40
+ output.save(output_path)
41
+
42
+ app = FastAPI()
43
+
44
+ @app.get("/")
45
+ def greet_json():
46
+ return {"Hello": "World!"}
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ uvicorn[standard]==0.30.1
2
+ fastapi==0.111.0
3
+ outetts>=0.4
4
+ transformers==4.48.3