commit 69d269d2f09d55b483756709c58cd21e5ca02230
Author: Pat Wendorf <dungeons@gmail.com>
Date:   Fri Jun 21 12:52:10 2024 -0400

    first commit

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..a553afc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,137 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Config and history files
+*.json
+
+# No audio files or summaries
+*.wav
+*.md
+*.tns
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/meetings.bat b/meetings.bat
new file mode 100644
index 0000000..e13255e
--- /dev/null
+++ b/meetings.bat
@@ -0,0 +1,2 @@
+@echo off
+python meetings.py
\ No newline at end of file
diff --git a/meetings.py b/meetings.py
new file mode 100644
index 0000000..8a7668c
--- /dev/null
+++ b/meetings.py
@@ -0,0 +1,119 @@
+import sys
+import os
+import subprocess
+import pyaudio
+import wave
+import threading
+from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QVBoxLayout, QInputDialog
+
+class RecordingApp(QWidget):
+    def __init__(self):
+        super().__init__()
+        self.initUI()
+        self.is_recording = False
+        self.audio_thread = None
+        self.stream = None
+        self.p = None
+        self.wf = None
+
+    def initUI(self):
+        self.setWindowTitle('Meeting Recorder')
+        self.setGeometry(500, 500, 500, 150)
+
+        layout = QVBoxLayout()
+
+        self.record_button = QPushButton('Record', self)
+        self.record_button.clicked.connect(self.toggle_recording)
+        layout.addWidget(self.record_button)
+
+        self.transcribe_button = QPushButton('Transcribe', self)
+        self.transcribe_button.clicked.connect(self.transcribe)
+        layout.addWidget(self.transcribe_button)
+
+        self.clean_button = QPushButton('Clean', self)
+        self.clean_button.clicked.connect(self.clean)
+        layout.addWidget(self.clean_button)
+
+        self.setLayout(layout)
+
+    def toggle_recording(self):
+        if not self.is_recording:
+            self.start_recording()
+        else:
+            self.stop_recording()
+
+    def start_recording(self):
+        filename, ok = QInputDialog.getText(self, 'Input Dialog', 'Enter filename:')
+        if ok and filename:
+            self.is_recording = True
+            self.record_button.setText('Stop Recording')
+            self.audio_thread = threading.Thread(target=self.record_audio, args=(filename,))
+            self.audio_thread.start()
+
+    def stop_recording(self):
+        if self.is_recording:
+            self.is_recording = False
+            self.record_button.setText('Record')
+            if self.audio_thread:
+                self.audio_thread.join()
+            if self.stream:
+                self.stream.stop_stream()
+                self.stream.close()
+            if self.p:
+                self.p.terminate()
+            if self.wf:
+                self.wf.close()
+
+    def record_audio(self, filename):
+        chunk_size = 1024
+        sampling_rate = 16000
+        num_channels = 1
+
+        self.p = pyaudio.PyAudio()
+        file_path = f"{filename}.wav"
+
+        self.wf = wave.open(file_path, 'wb')
+        self.wf.setnchannels(num_channels)
+        self.wf.setsampwidth(self.p.get_sample_size(pyaudio.paInt16))
+        self.wf.setframerate(sampling_rate)
+
+        self.stream = self.p.open(
+            format=pyaudio.paInt16,
+            channels=num_channels,
+            rate=sampling_rate,
+            input=True,
+            frames_per_buffer=chunk_size,
+            input_device_index=0
+        )
+
+        print(f"Recording to {file_path}. Press 'Stop Recording' to stop...")
+
+        while self.is_recording:
+            data = self.stream.read(chunk_size)
+            self.wf.writeframes(data)
+
+        print(f"Audio saved to {file_path}")
+
+    def transcribe(self):
+        subprocess.Popen(['python', 'summarize.py'])
+
+    def clean(self):
+        print("Cleaning files...")
+        try:
+            for file in os.listdir('.'):
+                if file.endswith(('.wav', '.tns')):
+                    os.remove(file)
+                    print(f"Deleted: {file}")
+            print("Cleaning complete.")
+        except Exception as e:
+            print(f"An error occurred while cleaning files: {e}")
+
+    def closeEvent(self, event):
+        self.stop_recording()
+        event.accept()
+
+if __name__ == '__main__':
+    app = QApplication(sys.argv)
+    ex = RecordingApp()
+    ex.show()
+    sys.exit(app.exec_())
\ No newline at end of file
diff --git a/meetings.sh b/meetings.sh
new file mode 100644
index 0000000..f2c2c3e
--- /dev/null
+++ b/meetings.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+python3 meetings.py
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..3f0b211
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,10 @@
+# This file is used by pip to install required python packages
+# Usage: pip install -r requirements.txt
+
+# UI
+pyqt5
+pyaudio
+
+# Transcriber
+requests
+dotenv
\ No newline at end of file
diff --git a/sample.env b/sample.env
new file mode 100644
index 0000000..55a6870
--- /dev/null
+++ b/sample.env
@@ -0,0 +1,9 @@
+WHISPERCPP_URL="http://localhost:8088/inference"
+LLAMACPP_URL="http://localhost:8080/completion"
+SYSTEM_MESSAGE="You are a friendly chatbot that summarizes call transcripts"
+SUMMARY_PROMPT="Call Transcript: {chunk}\n\nInstruction: Summarize the above call transcript but DO NOT MENTION THE TRANSCRIPT"
+SENTIMENT_PROMPT="Call Transcript: {chunk}\n\nInstruction: Summarize the sentiment for topics in the above call transcript but DO NOT MENTION THE TRANSCRIPT"
+PROMPT_FORMAT="<|im_start|>system\n{system}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n"
+STOP_TOKEN="<|im_end|>"
+CHUNK_SIZE=12288
+TEMPERATURE=0.1
\ No newline at end of file
diff --git a/summarize.py b/summarize.py
new file mode 100644
index 0000000..562b95c
--- /dev/null
+++ b/summarize.py
@@ -0,0 +1,115 @@
+import os
+import requests
+import datetime
+from dotenv import load_dotenv
+
+# Load environment variables from .env file
+load_dotenv()
+
+# Load settings from environment
+WHISPERCPP_URL = os.getenv("WHISPERCPP_URL")
+LLAMACPP_URL = os.getenv("LLAMACPP_URL")
+SYSTEM_MESSAGE = os.getenv("SYSTEM_MESSAGE")
+SUMMARY_PROMPT = os.getenv("SUMMARY_PROMPT")
+SENTIMENT_PROMPT = os.getenv("SENTIMENT_PROMPT")
+PROMPT_FORMAT = os.getenv("PROMPT_FORMAT")
+STOP_TOKEN = os.getenv("STOP_TOKEN")
+CHUNK_SIZE = int(os.getenv("CHUNK_SIZE"))
+TEMPERATURE = float(os.getenv("TEMPERATURE"))
+
+def whisper_api(file):
+    # Whisper supports multiple files, but we're sending one
+    files = {"file": file}
+    
+    # Required API call data
+    api_data = {
+        "temperature": "0.0",
+        "response_format": "json"
+    }
+
+    # Call API and return text
+    response = requests.post(WHISPERCPP_URL, data=api_data, files=files)
+    return response.json()["text"]
+
+def llama_api(prompt):
+    # Format prompt before sending
+    formatted_prompt = PROMPT_FORMAT.format(system=SYSTEM_MESSAGE, prompt=prompt)
+
+    api_data = {
+        "prompt": formatted_prompt,
+        "n_predict": -1,
+        "temperature": TEMPERATURE,
+        "stop": [STOP_TOKEN],
+        "tokens_cached": 0
+    }
+
+    response = requests.post(LLAMACPP_URL, headers={"Content-Type": "application/json"}, json=api_data)
+    json_output = response.json()
+    return json_output['content']
+
+# Iterate over each WAV file and transcode with whisper API
+wav_files = [f for f in os.listdir(".") if f.endswith(".wav")]
+for wav_file in wav_files:
+    # Open the WAV file
+    with open(wav_file, "rb") as file:
+        print("Transcribing: " + wav_file)
+        # Call whisper API to transcode file
+        output_text = whisper_api(file)
+
+        # Generate the output file name by replacing the extension with .tns
+        output_file = os.path.splitext(wav_file)[0] + ".tns"
+
+        # Write the output text to the file
+        with open(output_file, "w") as output:
+            output.write(output_text)
+        
+# Chunk the full transcript into multiple parts to fit in the context window
+# and allow for better reasoning capability
+def chunk_transcript(string, chunk_size):
+    chunks = []
+    lines = string.split("\n")  # Split the string on newline characters
+    current_chunk = ""
+    for line in lines:
+        current_chunk += line  # Build up the string until the chunk size is reached
+        if len(current_chunk) >= chunk_size:
+            chunks.append(current_chunk)
+            current_chunk = ""
+    if current_chunk:  # Add the last chunk if it's not empty
+        chunks.append(current_chunk)
+    return chunks
+
+# Get the current date in yyyymmdd format
+today = datetime.datetime.now().strftime('%Y%m%d')
+
+# Modify the filename by appending the current date
+summary_filename = "summary-" + today + ".md"
+
+# Get the list of transcript files in the current directory
+transcript_files = [f for f in os.listdir(".") if f.endswith(".tns")]
+
+# Iterate over each WAV file
+for transcript in transcript_files: 
+    print("Summarizing: " + transcript)
+
+    # Open the WAV file
+    with open(transcript, "r") as file:
+        transcript_data = file.read()
+
+        # chunk the transcript so we don't blow out the context window
+        chunked_data = chunk_transcript(transcript_data, CHUNK_SIZE)
+
+        # Iterate through the chunks, and summarize them
+        for i, chunk in enumerate(chunked_data):
+            with open(summary_filename, "a") as md_file:
+                # Generate call summary
+                summary_prompt = SUMMARY_PROMPT.format(chunk=chunk)
+                summary = llama_api(summary_prompt)
+
+                # Generate call sentiment
+                sentiment_prompt = SENTIMENT_PROMPT.format(chunk=chunk)
+                sentiment = llama_api(sentiment_prompt)
+
+                # Write the notes
+                md_file.write(f"# Summary - {transcript} - Part {i + 1}\n\n{summary}\n\n{sentiment}\n\n---\n")
+
+print("Summarizing complete")
\ No newline at end of file