diff --git a/migrations/README b/migrations/README new file mode 100644 index 0000000..0e04844 --- /dev/null +++ b/migrations/README @@ -0,0 +1 @@ +Single-database configuration for Flask. diff --git a/migrations/alembic.ini b/migrations/alembic.ini new file mode 100644 index 0000000..ec9d45c --- /dev/null +++ b/migrations/alembic.ini @@ -0,0 +1,50 @@ +# A generic, single database configuration. + +[alembic] +# template used to generate migration files +# file_template = %%(rev)s_%%(slug)s + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic,flask_migrate + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = WARN +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARN +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[logger_flask_migrate] +level = INFO +handlers = +qualname = flask_migrate + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(levelname)-5.5s [%(name)s] %(message)s +datefmt = %H:%M:%S diff --git a/migrations/env.py b/migrations/env.py new file mode 100644 index 0000000..4c97092 --- /dev/null +++ b/migrations/env.py @@ -0,0 +1,113 @@ +import logging +from logging.config import fileConfig + +from flask import current_app + +from alembic import context + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +fileConfig(config.config_file_name) +logger = logging.getLogger('alembic.env') + + +def get_engine(): + try: + # this works with Flask-SQLAlchemy<3 and Alchemical + return current_app.extensions['migrate'].db.get_engine() + except (TypeError, AttributeError): + # this works with Flask-SQLAlchemy>=3 + return current_app.extensions['migrate'].db.engine + + +def get_engine_url(): + try: + return get_engine().url.render_as_string(hide_password=False).replace( + '%', '%%') + except AttributeError: + return str(get_engine().url).replace('%', '%%') + + +# add your model's MetaData object here +# for 'autogenerate' support +# from myapp import mymodel +# target_metadata = mymodel.Base.metadata +config.set_main_option('sqlalchemy.url', get_engine_url()) +target_db = current_app.extensions['migrate'].db + +# other values from the config, defined by the needs of env.py, +# can be acquired: +# my_important_option = config.get_main_option("my_important_option") +# ... etc. + + +def get_metadata(): + if hasattr(target_db, 'metadatas'): + return target_db.metadatas[None] + return target_db.metadata + + +def run_migrations_offline(): + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, target_metadata=get_metadata(), literal_binds=True + ) + + with context.begin_transaction(): + context.run_migrations() + + +def run_migrations_online(): + """Run migrations in 'online' mode. + + In this scenario we need to create an Engine + and associate a connection with the context. + + """ + + # this callback is used to prevent an auto-migration from being generated + # when there are no changes to the schema + # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html + def process_revision_directives(context, revision, directives): + if getattr(config.cmd_opts, 'autogenerate', False): + script = directives[0] + if script.upgrade_ops.is_empty(): + directives[:] = [] + logger.info('No changes in schema detected.') + + conf_args = current_app.extensions['migrate'].configure_args + if conf_args.get("process_revision_directives") is None: + conf_args["process_revision_directives"] = process_revision_directives + + connectable = get_engine() + + with connectable.connect() as connection: + context.configure( + connection=connection, + target_metadata=get_metadata(), + **conf_args + ) + + with context.begin_transaction(): + context.run_migrations() + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/migrations/script.py.mako b/migrations/script.py.mako new file mode 100644 index 0000000..2c01563 --- /dev/null +++ b/migrations/script.py.mako @@ -0,0 +1,24 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision = ${repr(up_revision)} +down_revision = ${repr(down_revision)} +branch_labels = ${repr(branch_labels)} +depends_on = ${repr(depends_on)} + + +def upgrade(): + ${upgrades if upgrades else "pass"} + + +def downgrade(): + ${downgrades if downgrades else "pass"} diff --git a/migrations/versions/565419907b71_add_event_model.py b/migrations/versions/565419907b71_add_event_model.py new file mode 100644 index 0000000..a9439dc --- /dev/null +++ b/migrations/versions/565419907b71_add_event_model.py @@ -0,0 +1,32 @@ +"""Add Event model + +Revision ID: 565419907b71 +Revises: +Create Date: 2025-07-28 14:29:12.567526 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '565419907b71' +down_revision = None +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('note') + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('note', + sa.Column('id', sa.INTEGER(), nullable=False), + sa.Column('content', sa.TEXT(), nullable=False), + sa.PrimaryKeyConstraint('id') + ) + # ### end Alembic commands ### diff --git a/run.py b/run.py new file mode 100644 index 0000000..2964eca --- /dev/null +++ b/run.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +""" +EchoNote Application Entry Point + +This script runs the EchoNote Flask application. +Usage: python run.py +""" + +import sys +import os + +# Add the src directory to Python path so we can import our package +sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src')) + +from echonote.app import app +from echonote.database import init_db + +if __name__ == '__main__': + # Initialize the database + init_db() + + # Run the Flask app + app.run(debug=True, host='0.0.0.0', port=5000) diff --git a/src/echonote/__init__.py b/src/echonote/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/echonote/app.py b/src/echonote/app.py new file mode 100644 index 0000000..81d9f15 --- /dev/null +++ b/src/echonote/app.py @@ -0,0 +1,121 @@ +import os +from google.cloud import speech +from google.auth.exceptions import DefaultCredentialsError +from werkzeug.utils import secure_filename +from flask import Flask, render_template, request, redirect, url_for, jsonify +from flask_sqlalchemy import SQLAlchemy +from database import create_task, init_db, get_all_tasks, update_task, delete_task +from genai_parser import TaskParser +task_parser = TaskParser() + +app = Flask(__name__) +app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False +app.config['UPLOAD_FOLDER'] = os.path.join(os.getcwd(), 'uploads') + + +# Dummy speech client class for testing +class DummySpeechClient: + """adding so tests can import app.py without crashing""" + + def recognize(self, config, audio): + # stub - tests will override this + raise NotImplementedError("This should be monkey patched in tests") + + +# Initializing google speech client +try: + speech_client = speech.SpeechClient() +except DefaultCredentialsError: + # fallback to a stub so that tests can monkey patch + speech_client = DummySpeechClient() + +# Define nav links to be used across routes +def get_nav_links(): + return [ + {'href': '/', 'text': 'Home', 'endpoint': 'index'}, + {'href': '/draw', 'text': 'Draw', 'endpoint': 'draw'} + ] + +#updated to use Task model not Note model +@app.route('/', methods=['GET']) +def index(): + tasks = get_all_tasks() + return render_template('index.html', tasks=tasks, nav_links=get_nav_links()) + +@app.route('/draw', methods=['GET']) +def draw(): + return render_template('draw.html', nav_links=get_nav_links()) + +# Audio upload route +@app.route('/api/upload', methods=['POST']) +def upload_audio(): + f = request.files.get('audio') + if not f: + return {"error": "no file"}, 40 + # sanitizing filename (used werkzeug helper) + filename = secure_filename(f.filename) + upload_folder = app.config['UPLOAD_FOLDER'] + # make sure the folder exists + os.makedirs(upload_folder, exist_ok=True) + # do we want to save audio to disk? if not we dont need next two lines + save_path = os.path.join(upload_folder, filename) + f.save(save_path) + return {"filename": filename}, 200 + +# Audio transcribe route +@app.route('/api/transcribe', methods=['POST']) +def transcribe_audio(): + if 'audio' not in request.files: + return jsonify(error='no file'), 400 + audio_bytes = request.files['audio'].read() + audio = speech.RecognitionAudio(content=audio_bytes) + config = speech.RecognitionConfig( + language_code="en-US", + ) + resp = speech_client.recognize(config=config, audio=audio) + transcript = " ".join(r.alternatives[0].transcript for r in resp.results) + + return jsonify(transcript=transcript), 200 + +# List tasks route +@app.route('/api/tasks', methods=['GET']) +def list_tasks(): + tasks = get_all_tasks() + return jsonify([{ + "id": t.id, + "name": t.name, + "completed": t.completed + } for t in tasks]) + +# process tasks route +@app.route('/api/save_task', methods=['POST']) +def save_task(): + data = request.get_json() + print("Received JSON:", data) + + if not data or "transcript" not in data: + print("No transcript provided") + return jsonify(error='Transcript is required'), 400 + + transcript = data["transcript"] + parsed_tasks = task_parser.parse_transcript(transcript) + + if not isinstance(parsed_tasks, list): + return jsonify(error="Failed to parse tasks"), 500 + + count = 0 + for task_data in parsed_tasks: + task_text = task_data.get("text") + due_date = task_data.get("due") + print(f"Trying to save task: {task_text} (Due: {due_date})") + if task_text: + create_task(task_text, due_date) + count += 1 + + print(f"Saved {count} tasks to database") + return jsonify(message=f'{count} tasks saved'), 200 + +if __name__ == '__main__': + with app.app_context(): + init_db()#initialize the tasks database + app.run(debug=True) \ No newline at end of file diff --git a/src/echonote/database.py b/src/echonote/database.py new file mode 100644 index 0000000..4233e38 --- /dev/null +++ b/src/echonote/database.py @@ -0,0 +1,77 @@ +from sqlalchemy import create_engine, Column, Integer, String, Boolean, DateTime, Text +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import sessionmaker +from datetime import datetime, timezone + + +# create a SQLite database +DATABASE_URL = "sqlite:///./echo_note.db" +engine = create_engine(DATABASE_URL, connect_args={"check_same_thread": False}) +SessionLocal = sessionmaker(bind=engine) +Base = declarative_base() + + +# define the Task model +class Task(Base): + __tablename__ = 'tasks' + id = Column(Integer, primary_key=True) + name = Column(String(255), nullable=False) + completed = Column(Boolean, default=False) + created_at = Column(DateTime, default=lambda: datetime.now(timezone.utc)) + due_date = Column(String(100), nullable=True) + + def __repr__(self): + return f"" + + +# initialize the database + + +def init_db(): + Base.metadata.create_all(bind=engine) + +# CRUD operations + + +def create_task(name, due_date=None): + db = SessionLocal() + task = Task(name=name, due_date=due_date) + db.add(task) + db.commit() + db.close() + + + +def get_all_tasks(): + db = SessionLocal() + tasks = db.query(Task).all() + db.close() + return tasks + + +def update_task(task_id, name=None, completed=None): + db = SessionLocal() + task = db.get(Task, task_id) + if not task: + db.close() + return None # or raise an error + if name is not None: + task.name = name + if completed is not None: + task.completed = completed + db.commit() + db.refresh(task) + db.close() + return task + + +def delete_task(task_id): + db = SessionLocal() + task = db.get(Task, task_id) + if not task: + db.close() + return False + db.delete(task) + db.commit() + db.close() + return True diff --git a/src/echonote/genai_parser.py b/src/echonote/genai_parser.py new file mode 100644 index 0000000..345386a --- /dev/null +++ b/src/echonote/genai_parser.py @@ -0,0 +1,44 @@ +import os +from urllib import response +from dotenv import load_dotenv +import google.generativeai as genai +import json +import re + +'''re = regular expressions ; cleans up text patterns --> when run w/o, output is messy. example below +"tasks": "```json\n[\n {\n \"text\": \"Finish the report\",\n + \"due\": \"Monday\"\n },\n {\n \"text\": \"Email report to the team\",\n + \"due\": \"Monday\"\n }\n]\n```" +''' + +class TaskParser: + def __init__(self): + load_dotenv() + genai.configure(api_key=os.getenv("GENAI_KEY")) + self.model = genai.GenerativeModel("gemini-1.5-pro") + + def parse_transcript(self, transcript: str): + with open("prompt_template.txt", "r") as file: + base_prompt = file.read() + + full_prompt = f"{base_prompt}\n\n{transcript}" + + try: + response = self.model.generate_content(full_prompt) + print("RAW GEMINI OUTPUT:") + print(response.text) + + original = response.text.strip() + clean_it = re.sub(r"```json|```", "", original).strip() + tasks = json.loads(clean_it) + print("Parsed tasks:", tasks) + return tasks + except Exception as e: + print("Error parsing transcript:", e) + print("Raw Gemini output:", response.text if 'response' in locals() else "None") + return [] + +'''class that reads a transcript and sends it to Gemini using a custom prompt. +returns a list of tasks based on what the user said. This lets us take +unstructured voice input and turn it into structured to-do items for the project. +''' \ No newline at end of file diff --git a/templates/index.html b/templates/index.html index dc93d9a..08713b4 100644 --- a/templates/index.html +++ b/templates/index.html @@ -181,6 +181,66 @@

Tasks

document.getElementById('cancel-btn').addEventListener('click', function() { document.getElementById('confirmation-popup').style.display = 'none'; }); + + // ✅ Handle undo button (mark task as incomplete) + document.querySelectorAll('.btn-undo').forEach(button => { + button.addEventListener('click', async function() { + const taskId = this.getAttribute('data-task-id'); + + try { + const response = await fetch(`/api/tasks/${taskId}`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + completed: false + }) + }); + + if (response.ok) { + // Refresh the page to show updated task + window.location.reload(); + } else { + const errorData = await response.json(); + throw new Error(errorData.error || 'Failed to mark task as incomplete'); + } + } catch (error) { + console.error('Error marking task as incomplete:', error); + alert('Error marking task as incomplete: ' + error.message); + } + }); + }); + + // ✅ Handle done button (mark task as complete) + document.querySelectorAll('.btn-done').forEach(button => { + button.addEventListener('click', async function() { + const taskId = this.getAttribute('data-task-id'); + + try { + const response = await fetch(`/api/tasks/${taskId}`, { + method: 'PUT', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + completed: true + }) + }); + + if (response.ok) { + // Refresh the page to show updated task + window.location.reload(); + } else { + const errorData = await response.json(); + throw new Error(errorData.error || 'Failed to mark task as completed'); + } + } catch (error) { + console.error('Error marking task as completed:', error); + alert('Error marking task as completed: ' + error.message); + } + }); + }); });