first commit

This commit is contained in:
Pat Wendorf 2024-06-21 12:52:10 -04:00
commit 69d269d2f0
7 changed files with 394 additions and 0 deletions

137
.gitignore vendored Normal file
View file

@ -0,0 +1,137 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Config and history files
*.json
# No audio files or summaries
*.wav
*.md
*.tns
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

2
meetings.bat Normal file
View file

@ -0,0 +1,2 @@
@echo off
python meetings.py

119
meetings.py Normal file
View file

@ -0,0 +1,119 @@
import sys
import os
import subprocess
import pyaudio
import wave
import threading
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QInputDialog
class RecordingApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
self.is_recording = False
self.audio_thread = None
self.stream = None
self.p = None
self.wf = None
def initUI(self):
self.setWindowTitle('Meeting Recorder')
self.setGeometry(500, 500, 500, 150)
layout = QVBoxLayout()
self.record_button = QPushButton('Record', self)
self.record_button.clicked.connect(self.toggle_recording)
layout.addWidget(self.record_button)
self.transcribe_button = QPushButton('Transcribe', self)
self.transcribe_button.clicked.connect(self.transcribe)
layout.addWidget(self.transcribe_button)
self.clean_button = QPushButton('Clean', self)
self.clean_button.clicked.connect(self.clean)
layout.addWidget(self.clean_button)
self.setLayout(layout)
def toggle_recording(self):
if not self.is_recording:
self.start_recording()
else:
self.stop_recording()
def start_recording(self):
filename, ok = QInputDialog.getText(self, 'Input Dialog', 'Enter filename:')
if ok and filename:
self.is_recording = True
self.record_button.setText('Stop Recording')
self.audio_thread = threading.Thread(target=self.record_audio, args=(filename,))
self.audio_thread.start()
def stop_recording(self):
if self.is_recording:
self.is_recording = False
self.record_button.setText('Record')
if self.audio_thread:
self.audio_thread.join()
if self.stream:
self.stream.stop_stream()
self.stream.close()
if self.p:
self.p.terminate()
if self.wf:
self.wf.close()
def record_audio(self, filename):
chunk_size = 1024
sampling_rate = 16000
num_channels = 1
self.p = pyaudio.PyAudio()
file_path = f"{filename}.wav"
self.wf = wave.open(file_path, 'wb')
self.wf.setnchannels(num_channels)
self.wf.setsampwidth(self.p.get_sample_size(pyaudio.paInt16))
self.wf.setframerate(sampling_rate)
self.stream = self.p.open(
format=pyaudio.paInt16,
channels=num_channels,
rate=sampling_rate,
input=True,
frames_per_buffer=chunk_size,
input_device_index=0
)
print(f"Recording to {file_path}. Press 'Stop Recording' to stop...")
while self.is_recording:
data = self.stream.read(chunk_size)
self.wf.writeframes(data)
print(f"Audio saved to {file_path}")
def transcribe(self):
subprocess.Popen(['python', 'summarize.py'])
def clean(self):
print("Cleaning files...")
try:
for file in os.listdir('.'):
if file.endswith(('.wav', '.tns')):
os.remove(file)
print(f"Deleted: {file}")
print("Cleaning complete.")
except Exception as e:
print(f"An error occurred while cleaning files: {e}")
def closeEvent(self, event):
self.stop_recording()
event.accept()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = RecordingApp()
ex.show()
sys.exit(app.exec_())

2
meetings.sh Normal file
View file

@ -0,0 +1,2 @@
#!/bin/sh
python3 meetings.py

10
requirements.txt Normal file
View file

@ -0,0 +1,10 @@
# This file is used by pip to install required python packages
# Usage: pip install -r requirements.txt
# UI
pyqt5
pyaudio
# Transcriber
requests
dotenv

9
sample.env Normal file
View file

@ -0,0 +1,9 @@
WHISPERCPP_URL="http://localhost:8088/inference"
LLAMACPP_URL="http://localhost:8080/completion"
SYSTEM_MESSAGE="You are a friendly chatbot that summarizes call transcripts"
SUMMARY_PROMPT="Call Transcript: {chunk}\n\nInstruction: Summarize the above call transcript but DO NOT MENTION THE TRANSCRIPT"
SENTIMENT_PROMPT="Call Transcript: {chunk}\n\nInstruction: Summarize the sentiment for topics in the above call transcript but DO NOT MENTION THE TRANSCRIPT"
PROMPT_FORMAT="<|im_start|>system\n{system}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
STOP_TOKEN="<|im_end|>"
CHUNK_SIZE=12288
TEMPERATURE=0.1

115
summarize.py Normal file
View file

@ -0,0 +1,115 @@
import os
import requests
import datetime
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Load settings from environment
WHISPERCPP_URL = os.getenv("WHISPERCPP_URL")
LLAMACPP_URL = os.getenv("LLAMACPP_URL")
SYSTEM_MESSAGE = os.getenv("SYSTEM_MESSAGE")
SUMMARY_PROMPT = os.getenv("SUMMARY_PROMPT")
SENTIMENT_PROMPT = os.getenv("SENTIMENT_PROMPT")
PROMPT_FORMAT = os.getenv("PROMPT_FORMAT")
STOP_TOKEN = os.getenv("STOP_TOKEN")
CHUNK_SIZE = int(os.getenv("CHUNK_SIZE"))
TEMPERATURE = float(os.getenv("TEMPERATURE"))
def whisper_api(file):
# Whisper supports multiple files, but we're sending one
files = {"file": file}
# Required API call data
api_data = {
"temperature": "0.0",
"response_format": "json"
}
# Call API and return text
response = requests.post(WHISPERCPP_URL, data=api_data, files=files)
return response.json()["text"]
def llama_api(prompt):
# Format prompt before sending
formatted_prompt = PROMPT_FORMAT.format(system=SYSTEM_MESSAGE, prompt=prompt)
api_data = {
"prompt": formatted_prompt,
"n_predict": -1,
"temperature": TEMPERATURE,
"stop": [STOP_TOKEN],
"tokens_cached": 0
}
response = requests.post(LLAMACPP_URL, headers={"Content-Type": "application/json"}, json=api_data)
json_output = response.json()
return json_output['content']
# Iterate over each WAV file and transcode with whisper API
wav_files = [f for f in os.listdir(".") if f.endswith(".wav")]
for wav_file in wav_files:
# Open the WAV file
with open(wav_file, "rb") as file:
print("Transcribing: " + wav_file)
# Call whisper API to transcode file
output_text = whisper_api(file)
# Generate the output file name by replacing the extension with .tns
output_file = os.path.splitext(wav_file)[0] + ".tns"
# Write the output text to the file
with open(output_file, "w") as output:
output.write(output_text)
# Chunk the full transcript into multiple parts to fit in the context window
# and allow for better reasoning capability
def chunk_transcript(string, chunk_size):
chunks = []
lines = string.split("\n") # Split the string on newline characters
current_chunk = ""
for line in lines:
current_chunk += line # Build up the string until the chunk size is reached
if len(current_chunk) >= chunk_size:
chunks.append(current_chunk)
current_chunk = ""
if current_chunk: # Add the last chunk if it's not empty
chunks.append(current_chunk)
return chunks
# Get the current date in yyyymmdd format
today = datetime.datetime.now().strftime('%Y%m%d')
# Modify the filename by appending the current date
summary_filename = "summary-" + today + ".md"
# Get the list of transcript files in the current directory
transcript_files = [f for f in os.listdir(".") if f.endswith(".tns")]
# Iterate over each WAV file
for transcript in transcript_files:
print("Summarizing: " + transcript)
# Open the WAV file
with open(transcript, "r") as file:
transcript_data = file.read()
# chunk the transcript so we don't blow out the context window
chunked_data = chunk_transcript(transcript_data, CHUNK_SIZE)
# Iterate through the chunks, and summarize them
for i, chunk in enumerate(chunked_data):
with open(summary_filename, "a") as md_file:
# Generate call summary
summary_prompt = SUMMARY_PROMPT.format(chunk=chunk)
summary = llama_api(summary_prompt)
# Generate call sentiment
sentiment_prompt = SENTIMENT_PROMPT.format(chunk=chunk)
sentiment = llama_api(sentiment_prompt)
# Write the notes
md_file.write(f"# Summary - {transcript} - Part {i + 1}\n\n{summary}\n\n{sentiment}\n\n---\n")
print("Summarizing complete")