Spaces:
Running
Running
File size: 1,916 Bytes
547836e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
from dotenv import load_dotenv
from huggingface_hub import HfApi, hf_hub_download
import os
import io
import pandas as pd
# Load environment variables from .env file
load_dotenv()
# ASR_model = "openai/whisper-largev2" # Replace with your ASR model
# csv_path = "test.csv" #read from local
# csv_transcript = f"test_with_{ASR_model.replace("/","_")}.csv" # to save in dataset repo
# csv_result = f"test_with_{ASR_model.replace("/","_")}_WER.csv" # to save in dataset repo
# df = pd.read_csv(csv_path)
# print(f"CSV Loaded with {len(df)} rows")
def upload_csv(df,csv_filename):
csv_buffer = io.BytesIO()
df.to_csv(csv_buffer, index=False)
csv_buffer.seek(0)
try:
# Upload the generated csv to Hugging Face Hub
api = HfApi(token=os.getenv("HF_TOKEN"))
print(f"✅ CSV uploading : {csv_filename}")
api.upload_file(
path_or_fileobj=csv_buffer,
path_in_repo=csv_filename,
repo_id="satyamr196/asr_fairness_results",
repo_type="dataset"
)
return True
except Exception as e:
print(f"⚠️ Could not upload CSV: {csv_filename} — {e}")
return False
# upload_csv(df,f"test_with_{ASR_model.replace("/","_")}_WER.csv");
def download_csv(csv_filename):
repo_id = "satyamr196/asr_fairness_results"
try:
# Download the CSV file from the dataset repo
csv_path = hf_hub_download(repo_id=repo_id, filename=csv_filename, repo_type="dataset")
# Load into pandas
return pd.read_csv(csv_path)
except Exception as e:
# print(f"⚠️ Could not load CSV: {csv_filename} — {e}")
return None
# # # Load the csv from the Hugging Face Hub
# df = download_csv(csv_result)
# if(df is None):
# print(f"CSV not found in the dataset repo. Please upload the file first.")
# else:
# print(f"CSV Loaded with {len(df)} rows")
# print(df) |