-
Notifications
You must be signed in to change notification settings - Fork 0
/
0_1_output_feature_pt.py
150 lines (103 loc) · 5.54 KB
/
0_1_output_feature_pt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import os
from dataclasses import dataclass, field, replace
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import whisper_openAI.whisper as whisper
import torch
from whisper_openAI.whisper.tokenizer import Tokenizer, get_tokenizer
import torch
import torch.nn.functional as F
from torch import Tensor
import argparse
import time
start_time = time.time()
data_folder = 'Inference/gs_inferences/baseline_data_tiny_hypo_v2'
parser = argparse.ArgumentParser(description="Process and save data checkpoints")
parser.add_argument('--speaker_id', type=str, required=True, help="Speaker ID")
parser.add_argument('--model_name', type=str,
default='', help='Whisper model to load (default: tiny.en)')
args = parser.parse_args()
speaker_id = args.speaker_id
model_name = args.model_name
# We get the acoustic embeddings from Whisper Large V2
model,processor = whisper.load_model("large-v2")
print("whisper-v2 model loaded")
model.eval()
import json
# The below is the json file you can generate using the "To generatn-best hyporhesis.ipynb" notebook; Need to further tokenize the hypothesis
if model_name:
with open(f'{data_folder}/torgo_train_{speaker_id}_{model_name}.json', "r") as file: # Change the file path and name here
train_data = json.load(file)
with open(f'{data_folder}/torgo_val_{speaker_id}_{model_name}.json', "r") as valid_file:
val_data = json.load(valid_file)
# Load the test set
with open(f'{data_folder}/torgo_test_{speaker_id}_{model_name}.json', "r") as test_file:
test_data = json.load(test_file)
else:
with open(f'{data_folder}/torgo_train_{speaker_id}.json', "r") as file:
train_data = json.load(file)
with open(f'{data_folder}/torgo_val_{speaker_id}.json', "r") as valid_file:
val_data = json.load(valid_file)
# Load the test set
with open(f'{data_folder}/torgo_test_{speaker_id}.json', "r") as test_file:
test_data = json.load(test_file)
"""Implementation derived from https://github.com/tloen/alpaca-lora"""
import sys
from pathlib import Path
import torch
import requests
import json
import os
from lit_llama.tokenizer import Tokenizer
from tqdm import tqdm
tokenizer_path: Path = Path("weights/tokenizer.model")
tokenizer = Tokenizer(tokenizer_path)
print(f"train has {len(train_data):,} samples")
print("Processing train split ...")
def tokenize(tokenizer: Tokenizer, string: str, max_length: int, eos=True) -> torch.Tensor:
return tokenizer.encode(string, bos=True, eos=eos, max_length=max_length)
def process_train_data(train_data):
instruction = 'You are an ASR transcript selector. You have a few transcripts generated by an automatic speech recognition model. Your task is to generate the most likely transcript from them. If the generated transcripts have grammatical or logical errors, you will modify them accordingly to produce the most accurate and coherent transcript.'
result = []
for i in tqdm(range(len(train_data))):
for name in train_data[i].keys():
ip = train_data[i][name]
inference = ip['inference']
gt = ip['ground_truth']
# Removing the ground_truth, if present among the inferences for the prompt
if gt in inference:
inference.remove(gt)
# Joining the inputs with '\n'
for_input = '\n'.join(inference[:15])
# The prompt follows the Alpaca template
full_prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{for_input}\n\n### Response:"""
full_prompt_and_response = full_prompt + gt
encoded_full_prompt = tokenize(tokenizer, full_prompt, max_length=2048, eos=False)
encoded_full_prompt_and_response = tokenize(tokenizer, full_prompt_and_response, eos=True, max_length=2048)
labels = encoded_full_prompt_and_response.clone()
labels_with_masked_input = encoded_full_prompt_and_response.clone()
labels_with_masked_input[:len(encoded_full_prompt)] = -1
path = ip['path']
audio = whisper.load_audio(path)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(model.device) # Adjust as needed for your model
mel = mel.unsqueeze(0)
with torch.no_grad():
audio_features = model.encoder(mel)
result.append({**ip, 'index': name, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels, 'labels_with_masked_input': labels_with_masked_input, 'audio_features': audio_features.bfloat16()})
return result
split = "train"
result = process_train_data(train_data)
torch.save(result,f'{data_folder}/torgo_{speaker_id}_{model_name}_{split}.pt')
print(f"Processed {split} data and saved checkpoint for {speaker_id}")
# split = "val"
# result = process_train_data(val_data)
# torch.save(result,f'{data_folder}/torgo_{speaker_id}_{model_name}_{split}.pt')
# print(f"Processed {split} data and saved checkpoint for {speaker_id}")
# split = "test"
# result = process_train_data(test_data)
# torch.save(result,f'{data_folder}/torgo_{speaker_id}_{model_name}_{split}.pt')
# print(f"Processed {split} data and saved checkpoint for {speaker_id}")
end_time = time.time()
elapsed_time = end_time - start_time
elapsed_time_minutes = elapsed_time / 60
print(f"script runtime {elapsed_time_minutes:.2f}")