Spaces:
Sleeping
Sleeping
Upload 4 files
Browse files- Dockerfile +28 -0
- app.py +182 -0
- download_model.py +20 -0
- requirements.txt +5 -0
Dockerfile
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.9.9-slim
|
| 2 |
+
|
| 3 |
+
# Copy static ffmpeg binaries
|
| 4 |
+
COPY --from=mwader/static-ffmpeg:6.0 /ffmpeg /usr/local/bin/
|
| 5 |
+
COPY --from=mwader/static-ffmpeg:6.0 /ffprobe /usr/local/bin/
|
| 6 |
+
|
| 7 |
+
# Set working directory
|
| 8 |
+
WORKDIR /app
|
| 9 |
+
|
| 10 |
+
# Copy requirements first to leverage Docker cache
|
| 11 |
+
COPY requirements.txt .
|
| 12 |
+
|
| 13 |
+
# Install Python dependencies
|
| 14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
# Download the models and clean up cache in the same layer
|
| 17 |
+
COPY download_model.py .
|
| 18 |
+
RUN python download_model.py && \
|
| 19 |
+
rm -rf /root/.cache/huggingface
|
| 20 |
+
|
| 21 |
+
# Copy the rest of the application
|
| 22 |
+
COPY . .
|
| 23 |
+
|
| 24 |
+
# Expose the port
|
| 25 |
+
EXPOSE 7860
|
| 26 |
+
|
| 27 |
+
# Run the application
|
| 28 |
+
CMD gunicorn --bind 0.0.0.0:${PORT:-7860} --workers 1 --threads 8 --timeout 0 "app:app"
|
app.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import time
|
| 3 |
+
from faster_whisper import WhisperModel
|
| 4 |
+
import logging
|
| 5 |
+
from flask import Flask, render_template, request, send_file, after_this_request
|
| 6 |
+
from werkzeug.utils import secure_filename
|
| 7 |
+
|
| 8 |
+
app = Flask(__name__)
|
| 9 |
+
app.logger.setLevel(logging.INFO)
|
| 10 |
+
app.config['UPLOAD_FOLDER'] = 'uploads'
|
| 11 |
+
app.config['OUTPUT_FOLDER'] = 'outputs'
|
| 12 |
+
app.config['ALLOWED_EXTENSIONS'] = {'mp3', 'wav', 'flac', 'mp4', 'mkv', 'mov', 'm4a', 'ogg', 'webm'}
|
| 13 |
+
|
| 14 |
+
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
|
| 15 |
+
os.makedirs(app.config['OUTPUT_FOLDER'], exist_ok=True)
|
| 16 |
+
|
| 17 |
+
# Model cache to avoid reloading the same model
|
| 18 |
+
model_cache = {}
|
| 19 |
+
|
| 20 |
+
def get_model(model_type):
|
| 21 |
+
if model_type not in model_cache:
|
| 22 |
+
model_path = f"/app/models/{model_type}"
|
| 23 |
+
# Fallback for local development if /app/models doesn't exist
|
| 24 |
+
if not os.path.exists(model_path):
|
| 25 |
+
model_path = os.path.join(os.getcwd(), "models", model_type)
|
| 26 |
+
|
| 27 |
+
app.logger.info(f"Loading model: {model_type} from {model_path}")
|
| 28 |
+
model_cache[model_type] = WhisperModel(model_path, device="cpu", compute_type="int8")
|
| 29 |
+
return model_cache[model_type]
|
| 30 |
+
|
| 31 |
+
def allowed_file(filename):
|
| 32 |
+
return '.' in filename and \
|
| 33 |
+
filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']
|
| 34 |
+
|
| 35 |
+
def format_srt_time(seconds):
|
| 36 |
+
hours = int(seconds // 3600)
|
| 37 |
+
minutes = int((seconds % 3600) // 60)
|
| 38 |
+
secs = int(seconds % 60)
|
| 39 |
+
millis = int((seconds * 1000) % 1000)
|
| 40 |
+
return f"{hours:02}:{minutes:02}:{secs:02},{millis:03}"
|
| 41 |
+
|
| 42 |
+
def transcribe_with_whisper(input_file, output_dir, language, model_type, max_duration):
|
| 43 |
+
model = get_model(model_type)
|
| 44 |
+
|
| 45 |
+
# Perform transcription
|
| 46 |
+
transcribe_start = time.time()
|
| 47 |
+
# faster-whisper returns a generator of segments and info
|
| 48 |
+
segments, info = model.transcribe(
|
| 49 |
+
input_file,
|
| 50 |
+
language=language,
|
| 51 |
+
word_timestamps=True
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# Process segments into short chunks
|
| 55 |
+
processed_segments = []
|
| 56 |
+
|
| 57 |
+
for segment in segments:
|
| 58 |
+
# If segment is already short enough or has no word timestamps, keep it as is
|
| 59 |
+
if (segment.end - segment.start <= max_duration) or not segment.words:
|
| 60 |
+
processed_segments.append({
|
| 61 |
+
'start': segment.start,
|
| 62 |
+
'end': segment.end,
|
| 63 |
+
'text': segment.text.strip()
|
| 64 |
+
})
|
| 65 |
+
else:
|
| 66 |
+
# Split segment into smaller chunks based on word timestamps
|
| 67 |
+
current_chunk_words = []
|
| 68 |
+
chunk_start = None
|
| 69 |
+
|
| 70 |
+
for word in segment.words:
|
| 71 |
+
if chunk_start is None:
|
| 72 |
+
chunk_start = word.start
|
| 73 |
+
|
| 74 |
+
# If adding this word exceeds max_duration, finalize current chunk
|
| 75 |
+
if current_chunk_words and (word.end - chunk_start > max_duration):
|
| 76 |
+
processed_segments.append({
|
| 77 |
+
'start': chunk_start,
|
| 78 |
+
'end': current_chunk_words[-1].end,
|
| 79 |
+
'text': " ".join([w.word.strip() for w in current_chunk_words])
|
| 80 |
+
})
|
| 81 |
+
current_chunk_words = [word]
|
| 82 |
+
chunk_start = word.start
|
| 83 |
+
else:
|
| 84 |
+
current_chunk_words.append(word)
|
| 85 |
+
|
| 86 |
+
# Add the last chunk
|
| 87 |
+
if current_chunk_words:
|
| 88 |
+
processed_segments.append({
|
| 89 |
+
'start': chunk_start,
|
| 90 |
+
'end': current_chunk_words[-1].end,
|
| 91 |
+
'text': " ".join([w.word.strip() for w in current_chunk_words])
|
| 92 |
+
})
|
| 93 |
+
|
| 94 |
+
transcribe_duration = time.time() - transcribe_start
|
| 95 |
+
app.logger.info(f"[PROFILING] Transcribing file with {model_type} model took: {transcribe_duration:.2f} seconds")
|
| 96 |
+
app.logger.info(f"[PROFILING] Detected language: {info.language} with probability {info.language_probability:.2f}")
|
| 97 |
+
|
| 98 |
+
# Save to an SRT file
|
| 99 |
+
srt_filename = "output.srt"
|
| 100 |
+
srt_file = os.path.join(output_dir, srt_filename)
|
| 101 |
+
|
| 102 |
+
srt_save_start = time.time()
|
| 103 |
+
with open(srt_file, "w", encoding="utf-8") as f:
|
| 104 |
+
for idx, segment in enumerate(processed_segments):
|
| 105 |
+
start_time_srt = format_srt_time(segment['start'])
|
| 106 |
+
end_time_srt = format_srt_time(segment['end'])
|
| 107 |
+
|
| 108 |
+
f.write(f"{idx + 1}\n")
|
| 109 |
+
f.write(f"{start_time_srt} --> {end_time_srt}\n")
|
| 110 |
+
f.write(f"{segment['text']}\n\n")
|
| 111 |
+
|
| 112 |
+
srt_save_duration = time.time() - srt_save_start
|
| 113 |
+
app.logger.info(f"[PROFILING] Saving to SRT file took: {srt_save_duration:.2f} seconds")
|
| 114 |
+
|
| 115 |
+
return srt_file
|
| 116 |
+
|
| 117 |
+
@app.route('/', methods=['GET'])
|
| 118 |
+
def index():
|
| 119 |
+
return render_template('index.html')
|
| 120 |
+
|
| 121 |
+
@app.route('/transcribe', methods=['POST'])
|
| 122 |
+
def transcribe():
|
| 123 |
+
if 'file' not in request.files:
|
| 124 |
+
return 'No file uploaded', 400
|
| 125 |
+
|
| 126 |
+
file = request.files['file']
|
| 127 |
+
if file.filename == '':
|
| 128 |
+
return 'No selected file', 400
|
| 129 |
+
|
| 130 |
+
if not allowed_file(file.filename):
|
| 131 |
+
return 'Invalid file type. Allowed types: ' + ', '.join(app.config['ALLOWED_EXTENSIONS']), 400
|
| 132 |
+
|
| 133 |
+
language = request.form.get('language', 'en')
|
| 134 |
+
model_type = request.form.get('model_type', 'accurate')
|
| 135 |
+
try:
|
| 136 |
+
max_duration = float(request.form.get('max_duration', 2.0))
|
| 137 |
+
if not (1 <= max_duration <= 5):
|
| 138 |
+
max_duration = 2.0
|
| 139 |
+
except (ValueError, TypeError):
|
| 140 |
+
max_duration = 2.0
|
| 141 |
+
|
| 142 |
+
if file:
|
| 143 |
+
filename = secure_filename(file.filename)
|
| 144 |
+
input_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 145 |
+
|
| 146 |
+
# Save uploaded file
|
| 147 |
+
save_start = time.time()
|
| 148 |
+
file.save(input_path)
|
| 149 |
+
save_duration = time.time() - save_start
|
| 150 |
+
app.logger.info(f"[PROFILING] Saving uploaded file took: {save_duration:.2f} seconds")
|
| 151 |
+
|
| 152 |
+
try:
|
| 153 |
+
srt_path = transcribe_with_whisper(input_path, app.config['OUTPUT_FOLDER'], language, model_type, max_duration)
|
| 154 |
+
|
| 155 |
+
@after_this_request
|
| 156 |
+
def remove_files(response):
|
| 157 |
+
try:
|
| 158 |
+
remove_start = time.time()
|
| 159 |
+
os.remove(input_path)
|
| 160 |
+
os.remove(srt_path)
|
| 161 |
+
remove_duration = time.time() - remove_start
|
| 162 |
+
app.logger.info(f"[PROFILING] Removing files took: {remove_duration:.2f} seconds")
|
| 163 |
+
except Exception as e:
|
| 164 |
+
app.logger.error(f"Error removing files: {e}")
|
| 165 |
+
return response
|
| 166 |
+
|
| 167 |
+
return send_file(srt_path, as_attachment=True, download_name=f"{os.path.splitext(filename)[0]}.srt")
|
| 168 |
+
|
| 169 |
+
except Exception as e:
|
| 170 |
+
app.logger.error(f"Transcription error: {str(e)}")
|
| 171 |
+
return f"An error occurred: {str(e)}", 500
|
| 172 |
+
|
| 173 |
+
if __name__ == '__main__':
|
| 174 |
+
port = int(os.environ.get('PORT', 7860))
|
| 175 |
+
app.run(host='0.0.0.0', port=port)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
#############
|
| 179 |
+
|
| 180 |
+
#if __name__ == "__main__":
|
| 181 |
+
# import uvicorn
|
| 182 |
+
# uvicorn.run(app, host="0.0.0.0", port=7860)
|
download_model.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from faster_whisper import download_model
|
| 2 |
+
import shutil
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
def fetch_and_move(model_name, target_dir):
|
| 6 |
+
print(f"Downloading model: {model_name}...")
|
| 7 |
+
downloaded_path = download_model(model_name)
|
| 8 |
+
|
| 9 |
+
if os.path.exists(target_dir):
|
| 10 |
+
shutil.rmtree(target_dir)
|
| 11 |
+
|
| 12 |
+
os.makedirs(os.path.dirname(target_dir), exist_ok=True)
|
| 13 |
+
shutil.copytree(downloaded_path, target_dir)
|
| 14 |
+
print(f"Model {model_name} successfully moved to: {target_dir}")
|
| 15 |
+
|
| 16 |
+
# Fast model
|
| 17 |
+
fetch_and_move("base", "/app/models/fast")
|
| 18 |
+
|
| 19 |
+
# Accurate model
|
| 20 |
+
fetch_and_move("Systran/faster-whisper-large-v3", "/app/models/accurate")
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Flask==2.0.2
|
| 2 |
+
Werkzeug==2.2.3
|
| 3 |
+
faster-whisper
|
| 4 |
+
numpy<2.0.0
|
| 5 |
+
gunicorn
|