voice notes:

voice and journal slash commands



Inline insertText option for voice notes that inserts things at the cursor and shows the embed in a prompt



test longer (>25mbs) files



keep page online



name

Voice notes

setting

OPENAI API KEY

setting

OPENAI MODEL

setting

DESTINATION NOTE TAG

icon

mic_none

instructions


This is a plugin that uses the Whisper AI integration to transcribe audio into text with high fidelity. Great for taking notes while on mobile, etc.

This plugin requires your own OpenAI API key. This means that you will be charged by OpenAI based on your usage. Get your API key and use the plugin settings page to enter the value before you start using the plugin.

linkInstructions

First time you use the plugin, invoke it with Quick Open with Ctrl-O or Cmd-O by typing in "Voice notes"

Then, you can either:

Invoke it again with Quick Open, in which case recording will start automatically

Or open the plugin from the left-handside sidebar, where you can click the button to start recording.

When recording voice notes on mobile, the plugin must be in view and regardless of platform, you cannot navigate away from the plugin page (eg. to Notes mode, calendar, etc.)


Notes:

There are limits to the file size accepted by Whisper, so note that longer recordings might be rejected.

Development notes: please read about Transient Activation to learn about various issues this plugin might encounter, especially on mobile, especially in contexts where the window/embed is not in focus. One such example is that writing stuff to the clibpoard is a gated API, which means that it cannot happen if the user wasn't recently active on the page

linkChangelog

Changelog:

September 24th, 2025:

Keep screen on (for longer) on mobile when recording

Might not work when power-saving modes are enabled

September 19th, 2025:

September 1st, 2025:

Better support for creating task properties

Change OpenAI model from the plugin settings page

Change the destination tag for resulting notes from the plugin settings page

UI that lets you choose model, OpenAI key and destination note tag on first run.

August 31st, 2025:

Misc fixes

July 30th, 2025

(Plugin will now work on Android following Amplenote mobile client fixes)

Plugin now works fullscreen and pins itself in the sidebar

Voice notes are now sent to OpenAI for summarization

Resulting tasks will have Amplenote properties applied to them, if any are deduced

Recording starts automatically when starting plugin with Quick Open Ctrl-O or Cmd-O

November 7th, 2024:

Fix an issue where longer recordings would not be processed (due to user not interacting with the page for a longer time)

No longer add transcription to the clipboard




/***
* Source Code: undefined
* Author:
* Build: production
* Character Count: 58371 (0.058 M)
* Target Folder: plugin
***/
(() => {
// inline-html:embed/index.html
var embed_default = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Voice Notes</title>
<style>
@import url('https://fonts.googleapis.com/css2?family=Roboto:wght@500&display=swap');
 
#recordButton {
width: 300px;
height: 300px;
font-size: 24px;
font-family: 'Roboto', sans-serif;
display: block;
margin: 20px;
border: none;
cursor: pointer;
background-color: #ffffff;
color: #333;
position: relative;
overflow: hidden;
text-align: center;
padding: 0;
outline: none;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
border-radius: 15px;
transition: background-color 0.3s ease;
}
 
#recordButton:hover {
background-color: #f9f9f9;
}
 
#recordButton.disabled {
background-color: #e0e0e0;
cursor: not-allowed;
}
 
#recordButton span {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 2;
}
 
#timer {
position: absolute;
top: 30%;
left: 50%;
transform: translate(-50%, -50%);
font-size: 16px;
color: gray;
z-index: 3;
display: none;
}
 
#visualizer {
position: absolute;
top: 0;
left: 0;
z-index: 1;
border-radius: 15px;
overflow: hidden;
}
 
body {
margin: 20px;
font-family: 'Roboto', sans-serif;
background-color: transparent;
}
</style>
</head>
 
<body>
<button id="recordButton">
<div id="timer">00:00</div>
<span>Start Recording</span>
</button>
 
 
<script>(() => {
// common-utils/plugin-communication.js
var PluginCommunicationService = class {
async insertText(text) {
throw new Error("insertText must be implemented");
}
async showAlert(message) {
throw new Error("showAlert must be implemented");
}
async callPlugin(method, ...args) {
throw new Error("callPlugin must be implemented");
}
}, DevPluginCommunicationService = class extends PluginCommunicationService {
constructor(config = {}) {
super(), this.config = {
logPrefix: config.logPrefix || "[DEV PLUGIN]",
...config
}, this.insertedTexts = [], this.mockFlags = {
wasJustInvoked: !1
// Mock the plugin's flag state
}, console.log("\\u{1F527} [DEV] DevPluginCommunicationService initialized");
}
async insertText(text) {
return console.log(this.config.logPrefix, "Inserting text:", text), this.insertedTexts.push({
text,
timestamp: (/* @__PURE__ */ new Date()).toISOString()
}), await this.simulateDelay(200), console.log(this.config.logPrefix, "Text inserted successfully"), !0;
}
async showAlert(message) {
return console.log(this.config.logPrefix, "Showing alert:", message), alert(\`[DEV MODE] \${message}\`), !0;
}
async callPlugin(method, ...args) {
if (console.log(this.config.logPrefix, "Calling plugin method:", method, "with args:", args), method === "wasJustInvoked") {
let result = this.mockFlags.wasJustInvoked;
return console.log(this.config.logPrefix, "Mock: wasJustInvoked returning", result), this.mockFlags.wasJustInvoked = !1, console.log(this.config.logPrefix, "Mock: Cleared wasJustInvoked flag"), result;
} else if (method === "getCurrentNoteUUID") {
let mockNoteUUID = "mock-note-uuid-12345";
return console.log(this.config.logPrefix, "Mock: getCurrentNoteUUID returning", mockNoteUUID), mockNoteUUID;
} else if (method === "getNoteTasks") {
let noteUUID = args[0];
console.log(this.config.logPrefix, "Mock: getNoteTasks for note", noteUUID);
let mockTasks = [
{
uuid: "task-uuid-1",
content: "Call the plumber today to fix the kitchen sink leak before it floods",
important: !1,
urgent: !1
},
{
uuid: "task-uuid-2",
content: "Sort through and donate outgrown kids' clothes by Saturday afternoon",
important: !1,
urgent: !1
},
{
uuid: "task-uuid-3",
content: "Make reservations for Grandma's 80th birthday dinner on Sunday at 6 PM",
important: !1,
urgent: !1
}
];
return console.log(this.config.logPrefix, "Mock: returning", mockTasks.length, "tasks"), mockTasks;
} else if (method === "updateTask") {
let taskUUID = args[0], properties = args[1];
return console.log(this.config.logPrefix, "Mock: updateTask", taskUUID, "with properties:", properties), console.log(this.config.logPrefix, "Mock: task updated successfully"), !0;
} else {
if (method === "getApiKey")
return console.log(this.config.logPrefix, "Mock: getApiKey - falling back to environment variables"), null;
if (method === "getModel")
return console.log(this.config.logPrefix, "Mock: getModel - using default development model"), "gpt-4.1-mini";
}
return console.log(this.config.logPrefix, "Unknown method:", method), !1;
}
async simulateDelay(ms) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
getInsertedTexts() {
return [...this.insertedTexts];
}
clearInsertedTexts() {
this.insertedTexts = [];
}
getStats() {
return {
totalInsertions: this.insertedTexts.length,
lastInsertion: this.insertedTexts.length > 0 ? this.insertedTexts[this.insertedTexts.length - 1] : null,
mockFlags: this.mockFlags
};
}
// Dev-only method to simulate appOption being called
simulateAppOptionCall() {
console.log(this.config.logPrefix, "Simulating appOption call - setting wasJustInvoked = true"), this.mockFlags.wasJustInvoked = !0;
}
}, ProdPluginCommunicationService = class extends PluginCommunicationService {
constructor(callAmplenotePlugin) {
if (super(), this.callAmplenotePlugin = callAmplenotePlugin, !callAmplenotePlugin)
throw new Error("ProdPluginCommunicationService requires callAmplenotePlugin function");
}
async insertText(text) {
try {
return await this.callAmplenotePlugin("insertText", text);
} catch (error) {
throw console.error("Failed to insert text via plugin:", error), error;
}
}
async showAlert(message) {
try {
return await this.callAmplenotePlugin("showAlert", message);
} catch (error) {
throw console.error("Failed to show alert via plugin:", error), error;
}
}
async callPlugin(method, ...args) {
try {
return await this.callAmplenotePlugin(method, ...args);
} catch (error) {
throw console.error(\`Failed to call plugin method \${method}:\`, error), error;
}
}
}, PluginCommunicationServiceFactory = class {
static create(environment, options = {}) {
switch (environment) {
case "development":
return new DevPluginCommunicationService(options.devConfig);
case "production":
if (!options.callAmplenotePlugin)
throw new Error("Production environment requires callAmplenotePlugin option");
return new ProdPluginCommunicationService(options.callAmplenotePlugin);
default:
throw new Error(\`Unknown environment: \${environment}\`);
}
}
};
function detectEnvironment() {
let isLocalhost = window.location.hostname === "localhost" || window.location.hostname === "127.0.0.1" || window.location.hostname === "::1", isFileProtocol = window.location.protocol === "file:", isDevPort = window.location.port && (window.location.port.startsWith("3") || window.location.port.startsWith("8") || window.location.port.startsWith("5"));
return isLocalhost || isDevPort || isFileProtocol ? (console.log("\\u{1F680} Running in DEVELOPMENT mode"), "development") : (console.log("\\u{1F310} Running in PRODUCTION mode"), "production");
}
function setupPluginCommunication(config = {}) {
let environment = detectEnvironment(), options = { devConfig: config };
if (environment === "production")
if (typeof window.callAmplenotePlugin == "function")
options.callAmplenotePlugin = window.callAmplenotePlugin;
else
return console.warn("window.callAmplenotePlugin not available, falling back to development mode"), PluginCommunicationServiceFactory.create("development", options);
let service = PluginCommunicationServiceFactory.create(environment, options);
return environment === "development" && (window.pluginDebug = {
service,
getStats: () => service.getStats(),
getInsertedTexts: () => service.getInsertedTexts(),
clearInsertedTexts: () => service.clearInsertedTexts(),
simulateAppOptionCall: () => service.simulateAppOptionCall()
}, console.log("\\u{1F527} Development utilities available at window.pluginDebug"), console.log("\\u{1F527} Use window.pluginDebug.simulateAppOptionCall() to test auto-start")), service;
}
typeof window != "undefined" && (window.setupPluginCommunication = setupPluginCommunication);
})();
<\/script>
<script>(() => {
// node-modules-polyfills:node:process
function unimplemented(name) {
throw new Error("Node.js process " + name + " is not supported by JSPM core outside of Node.js");
}
var queue = [], draining = !1, currentQueue, queueIndex = -1;
function cleanUpNextTick() {
!draining || !currentQueue || (draining = !1, currentQueue.length ? queue = currentQueue.concat(queue) : queueIndex = -1, queue.length && drainQueue());
}
function drainQueue() {
if (!draining) {
var timeout = setTimeout(cleanUpNextTick, 0);
draining = !0;
for (var len = queue.length; len; ) {
for (currentQueue = queue, queue = []; ++queueIndex < len; )
currentQueue && currentQueue[queueIndex].run();
queueIndex = -1, len = queue.length;
}
currentQueue = null, draining = !1, clearTimeout(timeout);
}
}
function nextTick(fun) {
var args = new Array(arguments.length - 1);
if (arguments.length > 1)
for (var i = 1; i < arguments.length; i++)
args[i - 1] = arguments[i];
queue.push(new Item(fun, args)), queue.length === 1 && !draining && setTimeout(drainQueue, 0);
}
function Item(fun, array) {
this.fun = fun, this.array = array;
}
Item.prototype.run = function() {
this.fun.apply(null, this.array);
};
var title = "browser", arch = "x64", platform = "browser", env = {
PATH: "/usr/bin",
LANG: typeof navigator != "undefined" ? navigator.language + ".UTF-8" : void 0,
PWD: "/",
HOME: "/home",
TMP: "/tmp"
}, argv = ["/usr/bin/node"], execArgv = [], version = "v16.8.0", versions = {}, emitWarning = function(message, type) {
console.warn((type ? type + ": " : "") + message);
}, binding = function(name) {
unimplemented("binding");
}, umask = function(mask) {
return 0;
}, cwd = function() {
return "/";
}, chdir = function(dir) {
}, release = {
name: "node",
sourceUrl: "",
headersUrl: "",
libUrl: ""
};
function noop() {
}
var browser = !0, _rawDebug = noop, moduleLoadList = [];
function _linkedBinding(name) {
unimplemented("_linkedBinding");
}
var domain = {}, _exiting = !1, config = {};
function dlopen(name) {
unimplemented("dlopen");
}
function _getActiveRequests() {
return [];
}
function _getActiveHandles() {
return [];
}
var reallyExit = noop, _kill = noop, cpuUsage = function() {
return {};
}, resourceUsage = cpuUsage, memoryUsage = cpuUsage, kill = noop, exit = noop, openStdin = noop, allowedNodeEnvironmentFlags = {};
function assert(condition, message) {
if (!condition) throw new Error(message || "assertion error");
}
var features = {
inspector: !1,
debug: !1,
uv: !1,
ipv6: !1,
tls_alpn: !1,
tls_sni: !1,
tls_ocsp: !1,
tls: !1,
cached_builtins: !0
}, _fatalExceptions = noop, setUncaughtExceptionCaptureCallback = noop;
function hasUncaughtExceptionCaptureCallback() {
return !1;
}
var _tickCallback = noop, _debugProcess = noop, _debugEnd = noop, _startProfilerIdleNotifier = noop, _stopProfilerIdleNotifier = noop, stdout = void 0, stderr = void 0, stdin = void 0, abort = noop, pid = 2, ppid = 1, execPath = "/bin/usr/node", debugPort = 9229, argv0 = "node", _preload_modules = [], setSourceMapsEnabled = noop, _performance = {
now: typeof performance != "undefined" ? performance.now.bind(performance) : void 0,
timing: typeof performance != "undefined" ? performance.timing : void 0
};
_performance.now === void 0 && (nowOffset = Date.now(), _performance.timing && _performance.timing.navigationStart && (nowOffset = _performance.timing.navigationStart), _performance.now = () => Date.now() - nowOffset);
var nowOffset;
function uptime() {
return _performance.now() / 1e3;
}
var nanoPerSec = 1e9;
function hrtime(previousTimestamp) {
var baseNow = Math.floor((Date.now() - _performance.now()) * 1e-3), clocktime = _performance.now() * 1e-3, seconds = Math.floor(clocktime) + baseNow, nanoseconds = Math.floor(clocktime % 1 * 1e9);
return previousTimestamp && (seconds = seconds - previousTimestamp[0], nanoseconds = nanoseconds - previousTimestamp[1], nanoseconds < 0 && (seconds--, nanoseconds += nanoPerSec)), [seconds, nanoseconds];
}
hrtime.bigint = function(time) {
var diff = hrtime(time);
return typeof BigInt == "undefined" ? diff[0] * nanoPerSec + diff[1] : BigInt(diff[0] * nanoPerSec) + BigInt(diff[1]);
};
var _maxListeners = 10, _events = {}, _eventsCount = 0;
function on() {
return process;
}
var addListener = on, once = on, off = on, removeListener = on, removeAllListeners = on, emit = noop, prependListener = on, prependOnceListener = on;
function listeners(name) {
return [];
}
var process = {
version,
versions,
arch,
platform,
browser,
release,
_rawDebug,
moduleLoadList,
binding,
_linkedBinding,
_events,
_eventsCount,
_maxListeners,
on,
addListener,
once,
off,
removeListener,
removeAllListeners,
emit,
prependListener,
prependOnceListener,
listeners,
domain,
_exiting,
config,
dlopen,
uptime,
_getActiveRequests,
_getActiveHandles,
reallyExit,
_kill,
cpuUsage,
resourceUsage,
memoryUsage,
kill,
exit,
openStdin,
allowedNodeEnvironmentFlags,
assert,
features,
_fatalExceptions,
setUncaughtExceptionCaptureCallback,
hasUncaughtExceptionCaptureCallback,
emitWarning,
nextTick,
_tickCallback,
_debugProcess,
_debugEnd,
_startProfilerIdleNotifier,
_stopProfilerIdleNotifier,
stdout,
stdin,
stderr,
abort,
umask,
chdir,
cwd,
env,
title,
argv,
execArgv,
pid,
ppid,
execPath,
debugPort,
hrtime,
argv0,
_preload_modules,
setSourceMapsEnabled
};
 
// plugin/embed/whisper-api-service.js
var WhisperAPIService = class {
constructor(pluginCommunication) {
this.pluginCommunication = pluginCommunication, this.apiKey = null, this.loadOpenAIKey(), console.log("\\u{1F3A4} [WHISPER] WhisperAPIService initialized"), console.log("\\u{1F511} [WHISPER] OpenAI API key source:", this.isUsingMockKey() ? "mock" : "environment");
}
loadOpenAIKey() {
if (typeof localStorage != "undefined") {
let storedApiKey = localStorage.getItem("DEV_OPENAI_API_KEY");
if (storedApiKey) {
this.apiKey = storedApiKey, console.log("\\u{1F511} [WHISPER] Using OpenAI API key from localStorage");
return;
}
}
if (typeof process != "undefined" && process.env && process.env.OPENAI_API_KEY) {
this.apiKey = process.env.OPENAI_API_KEY, console.log("\\u{1F511} [WHISPER] Using OpenAI API key from process.env");
return;
}
if (typeof window != "undefined" && window.WHISPER_ENV && window.WHISPER_ENV.OPENAI_API_KEY) {
this.apiKey = window.WHISPER_ENV.OPENAI_API_KEY, console.log("\\u{1F511} [WHISPER] Using OpenAI API key from window.WHISPER_ENV");
return;
}
this.apiKey = "sk-mock-openai-key-for-development-testing", console.log("\\u26A0\\uFE0F [WHISPER] No OpenAI API key found, using mock key"), console.log('\\u{1F4A1} [WHISPER] Set your API key with: window.whisperDebug.setApiKey("your-key")'), console.log('\\u{1F4A1} [WHISPER] Or set window.WHISPER_ENV = {OPENAI_API_KEY: "your-key"} in browser console');
}
isUsingMockKey() {
return this.apiKey === "sk-mock-openai-key-for-development-testing";
}
async getApiKey() {
var _a;
if (console.log("[WHISPER] Getting OpenAI API key..."), console.log("[WHISPER] Plugin communication available:", !!this.pluginCommunication), console.log("[WHISPER] Plugin communication callPlugin method:", typeof ((_a = this.pluginCommunication) == null ? void 0 : _a.callPlugin)), this.pluginCommunication && typeof this.pluginCommunication.callPlugin == "function")
try {
console.log("[WHISPER] Attempting to get API key from plugin communication...");
let pluginApiKey = await this.pluginCommunication.callPlugin("getApiKey");
if (console.log("[WHISPER] Plugin communication returned API key:", pluginApiKey ? \`\${pluginApiKey.substring(0, 10)}...\` : "null/empty"), pluginApiKey && pluginApiKey.trim() !== "")
return console.log("\\u{1F511} [WHISPER] Using OpenAI API key from plugin settings"), pluginApiKey;
throw console.error("\\u{1F6A8} [WHISPER] No OpenAI API key found in plugin settings"), new Error("No OpenAI API key configured. Please set your API key in the plugin settings.");
} catch (error) {
console.log("\\u{1F50D} [WHISPER] Plugin communication failed, falling back to environment variables:", error);
}
else
console.log("[WHISPER] Plugin communication not available, using fallback");
return this.isUsingMockKey() && console.log("\\u{1F527} [WHISPER] Using development mock key"), console.log("[WHISPER] Returning API key:", this.apiKey ? \`\${this.apiKey.substring(0, 10)}...\` : "null"), this.apiKey;
}
async getModel() {
if (console.log("[WHISPER] Getting OpenAI model..."), this.pluginCommunication && typeof this.pluginCommunication.callPlugin == "function")
try {
let pluginModel = await this.pluginCommunication.callPlugin("getModel");
if (pluginModel && pluginModel.trim() !== "")
return console.log("\\u{1F916} [WHISPER] Using OpenAI model from plugin settings:", pluginModel), pluginModel;
} catch (error) {
console.log("\\u{1F50D} [WHISPER] Plugin communication failed for model, falling back to default:", error);
}
return "gpt-4.1-mini";
}
setApiKey(apiKey) {
this.apiKey = apiKey, typeof localStorage != "undefined" && (localStorage.setItem("DEV_OPENAI_API_KEY", apiKey), console.log("\\u{1F511} [WHISPER] OpenAI API key saved to localStorage")), console.log("[WHISPER] OpenAI API key updated to:", apiKey.substring(0, 10) + "...");
}
clearApiKey() {
this.apiKey = "sk-mock-openai-key-for-development-testing", typeof localStorage != "undefined" && (localStorage.removeItem("DEV_OPENAI_API_KEY"), console.log("\\u{1F511} [WHISPER] OpenAI API key cleared from localStorage"));
}
getCurrentApiKey() {
return this.isUsingMockKey() ? "[MOCK KEY]" : this.apiKey.substring(0, 10) + "...";
}
// Delegate basic plugin communication methods
async insertText(text) {
return await this.pluginCommunication.insertText(text);
}
async showAlert(message) {
return await this.pluginCommunication.showAlert(message);
}
async wasJustInvoked() {
return console.log("[WHISPER] Checking if plugin was just invoked from appOption..."), await this.pluginCommunication.callPlugin("wasJustInvoked");
}
async getCurrentNoteUUID() {
return console.log("[WHISPER] Getting current note UUID..."), await this.pluginCommunication.callPlugin("getCurrentNoteUUID");
}
async getNoteTasks(noteUUID) {
return console.log("[WHISPER] Getting tasks from note:", noteUUID), await this.pluginCommunication.callPlugin("getNoteTasks", noteUUID);
}
async updateTask(taskUUID, properties) {
return console.log("[WHISPER] Updating task:", taskUUID, "with properties:", properties), await this.pluginCommunication.callPlugin("updateTask", taskUUID, properties);
}
// Debug utilities
getStats() {
return {
openaiApiKey: this.getCurrentApiKey(),
isUsingMockKey: this.isUsingMockKey(),
...this.pluginCommunication.getStats()
};
}
};
function setupWhisperAPI() {
let pluginCommunication = window.setupPluginCommunication({
logPrefix: "[WHISPER DEV]"
}), whisperAPI = new WhisperAPIService(pluginCommunication);
return typeof window.pluginDebug != "undefined" && (window.whisperDebug = {
service: whisperAPI,
setApiKey: (key) => whisperAPI.setApiKey(key),
clearApiKey: () => whisperAPI.clearApiKey(),
getCurrentApiKey: () => whisperAPI.getCurrentApiKey(),
getStats: () => whisperAPI.getStats(),
showInstructions: () => {
console.log("\\u{1F527} [WHISPER] How to set your OpenAI API key:"), console.log("1. Get key from: https://platform.openai.com/api-keys"), console.log('2. Run: window.whisperDebug.setApiKey("sk-your-key-here")'), console.log("3. Refresh the page");
},
reloadApiKey: () => (whisperAPI.loadOpenAIKey(), whisperAPI.getCurrentApiKey()),
// Delegate plugin communication debug methods
getInsertedTexts: () => pluginCommunication.getInsertedTexts(),
clearInsertedTexts: () => pluginCommunication.clearInsertedTexts(),
// Testing utilities
simulateAppOptionCall: () => pluginCommunication.simulateAppOptionCall ? pluginCommunication.simulateAppOptionCall() : console.log("Not available in production mode")
}, console.log("\\u{1F3A4} Whisper debug utilities available at window.whisperDebug"), console.log('\\u{1F511} Use window.whisperDebug.setApiKey("your-key") to set OpenAI API key'), console.log("\\u{1F3AC} Use window.whisperDebug.simulateAppOptionCall() to test auto-start")), whisperAPI;
}
typeof window != "undefined" && (window.setupWhisperAPI = setupWhisperAPI);
})();
<\/script>
<script>(() => {
// plugin/embed/index.js
var whisperAPI = window.setupWhisperAPI();
window.whisperAPI = whisperAPI;
var ChatGPTProcessor = {
/**
* Processes transcript with ChatGPT using a two-step approach
* @param {string} transcript - The transcribed text
* @param {string} apiKey - OpenAI API key
* @param {string} model - OpenAI model to use
* @returns {Promise<Object>} Parsed response with summary and action items
*/
async processTranscript(transcript, apiKey, model = "gpt-4.1-mini") {
console.log("Processing transcript with ChatGPT (two-step approach)..."), console.log("Step 1: Getting basic summary and task list...");
let basicAnalysis = await this._getBasicAnalysis(transcript, apiKey, model);
return console.log("Step 2: Getting detailed task analysis..."), await this._getDetailedAnalysis(transcript, basicAnalysis, apiKey, model);
},
/**
* First ChatGPT request: Basic summary and task extraction
* @param {string} transcript - The transcribed text
* @param {string} apiKey - OpenAI API key
* @returns {Promise<Object>} Basic analysis with summary and simple task list
*/
async _getBasicAnalysis(transcript, apiKey, model) {
UIManager.updateButtonText("Creating summary...");
let prompt = this._buildBasicAnalysisPrompt(transcript), rawResponse = await this._sendChatGPTRequest(prompt, apiKey, model);
return this._parseResponse(rawResponse);
},
/**
* Second ChatGPT request: Detailed task analysis
* @param {string} transcript - The original transcribed text
* @param {Object} basicAnalysis - Results from first analysis
* @param {string} apiKey - OpenAI API key
* @returns {Promise<Object>} Detailed analysis with priorities, dates, and dependencies
*/
async _getDetailedAnalysis(transcript, basicAnalysis, apiKey, model) {
UIManager.updateButtonText("Analyzing tasks...");
let prompt = this._buildDetailedAnalysisPrompt(transcript, basicAnalysis), rawResponse = await this._sendChatGPTRequest(prompt, apiKey, model);
return this._parseResponse(rawResponse);
},
/**
* Builds the prompt for basic analysis (Step 1)
* @param {string} transcript - The transcribed text
* @returns {string} The formatted prompt for basic analysis
*/
_buildBasicAnalysisPrompt(transcript) {
return \`Please analyze the following voice note transcript and provide:
 
1. **Summary**: A concise summary in 3-5 bullet points capturing the main topics and key information
2. **Action Items**: A simple list of tasks mentioned in the transcript. Look for tasks implied in the transcript even if the user doesn't explicitly say "I need to do this" or "this is a task". Sometimes a sentence might contain two or more different tasks. Try to extract tasks with medium granularity, that is don't make tasks too small and atomic, but also not too broad. When you extract a task, try to look for clues as to wether a task is in fact two hidden tasks, eg. if something needs to happen before something else (a task blocking another one) and make sure to include both tasks in your list.
 
Please format your response as JSON:
 
{
"summary": ["bullet point 1", "bullet point 2", "bullet point 3"],
"actionItems": ["task 1", "task 2", "task 3"]
}
 
Transcript:
\${transcript}\`;
},
/**
* Builds the prompt for detailed analysis (Step 2)
* @param {string} transcript - The original transcribed text
* @param {Object} basicAnalysis - Results from first analysis
* @returns {string} The formatted prompt for detailed analysis
*/
_buildDetailedAnalysisPrompt(transcript, basicAnalysis) {
return \`The current date and time is: \${(/* @__PURE__ */ new Date()).toISOString()}
 
You previously analyzed a voice note and created this summary and basic task list:
 
SUMMARY:
\${basicAnalysis.summary.map((point) => \`- \${point}\`).join(\`
\`)}
 
BASIC TASKS:
\${basicAnalysis.actionItems.map((task, index) => \`\${index + 1}. \${task}\`).join(\`
\`)}
 
Now, please re-examine the ORIGINAL voice note transcript below and enhance the task analysis by:
 
1. Looking for clues about task importance and urgency in the original text. Some tasks might be marked as both important and urgent implicitly or explicitly in the transcript, so pay attention to that.
2. Identifying any explicit or implicit start dates and deadlines and task durations
3. Finding tasks that need to happen BEFORE other tasks (dependencies/blocking relationships)
 
If necessary, add more tasks to the list to make sure we show task blocking relationships.
 
Rules for analysis:
- ONLY apply start dates, deadlines, durations when they are explicitly mentioned or strongly implied in the transcript
- All dates should be relative to the current date and time mentioned above
- Look for words like "before", "after", "first", "then", "urgent", "important", "ASAP", "today", "tomorrow", etc.
- Dependencies should be based on logical task ordering mentioned in the transcript, so examples where a task needs to happen before another task in order for the second task to be possible to start.
 
 
Please format your response exactly like this:
 
{
"summary": ["bullet point 1", "bullet point 2", "bullet point 3"],
"actionItems": [
{
"id": "1",
"task": "Task description",
"priority": "important" | "urgent" | "neither" | "both",
"deadline": "2025-01-01T00:00:00.000Z", // optional - only if mentioned/implied
"start": "2025-01-01T00:00:00.000Z", // optional - only if mentioned/implied
"duration": 60, // in minutes, optional - only if mentioned/implied
"blocking": ["2", "3"] // optional - IDs of tasks this blocks
}
]
}
 
ORIGINAL TRANSCRIPT:
\${transcript}\`;
},
/**
* Sends request to ChatGPT API
* @param {string} prompt - The prompt to send
* @param {string} apiKey - OpenAI API key
* @param {string} model - OpenAI model to use
* @returns {Promise<string>} Raw response from ChatGPT
*/
async _sendChatGPTRequest(prompt, apiKey, model = "gpt-4.1-mini") {
let chatResponse = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
Authorization: \`Bearer \${apiKey}\`,
"Content-Type": "application/json"
},
body: JSON.stringify({
model,
messages: [
{
role: "user",
content: prompt
}
]
// temperature: 0.3
})
});
if (!chatResponse.ok) {
let errorData = await chatResponse.json();
throw new Error(errorData.error.message || "Error during ChatGPT processing.");
}
return (await chatResponse.json()).choices[0].message.content.trim();
},
/**
* Parses the raw response from ChatGPT
* @param {string} rawResponse - Raw response from ChatGPT
* @returns {Object} Parsed response with summary and action items
*/
_parseResponse(rawResponse) {
try {
let jsonString = rawResponse, codeBlockRegex = /^\`\`\`(?:json)?\\s*\\n?([\\s\\S]*?)\\n?\`\`\`$/, match = jsonString.match(codeBlockRegex);
return match && (jsonString = match[1].trim(), console.log("Extracted JSON from code block")), jsonString = jsonString.replace(/\\/\\/.*$/gm, "").replace(/\\/\\*[\\s\\S]*?\\*\\//g, ""), console.log("JSON to parse:", jsonString), JSON.parse(jsonString);
} catch (error) {
return console.error("Failed to parse ChatGPT JSON response:", error), console.log("Raw response:", rawResponse), {
summary: ["Error parsing ChatGPT response"],
actionItems: []
};
}
}
}, AudioProcessor = {
/**
* Processes audio recording through the complete pipeline
* @param {Blob} audioBlob - The recorded audio blob
*/
async processAudioRecording(audioBlob) {
let fileSizeMB = audioBlob.size / 1e6;
try {
let OPENAI_API_KEY = await whisperAPI.getApiKey(), OPENAI_MODEL = await whisperAPI.getModel(), isDevMode = this._isDevMode(), transcriptionText;
isDevMode ? transcriptionText = await this._getDevModeTranscript() : transcriptionText = await this._processProductionAudio(audioBlob, OPENAI_API_KEY), console.log("processing with ChatGPT (two-step approach)...");
let chatGPTData = await ChatGPTProcessor.processTranscript(transcriptionText, OPENAI_API_KEY, OPENAI_MODEL);
console.log("ChatGPT data:", chatGPTData);
let formattedText = this._formatContent(transcriptionText, chatGPTData), noteUUID = await whisperAPI.insertText(formattedText);
if (chatGPTData.actionItems && chatGPTData.actionItems.length > 0) {
UIManager.updateButtonText("Updating tasks...");
try {
await AmplenoteTaskManager.updateTaskProperties(noteUUID, chatGPTData.actionItems);
} catch (error) {
console.error("Error updating task properties:", error);
}
}
await whisperAPI.showAlert(\`Voice note processed successfully! Audio file size: \${fileSizeMB}MB
 
Transcription, summary, and action items have been added to your Voice Notes.\`);
} catch (error) {
await whisperAPI.showAlert("Error: " + error.message + \`
 
Audio file size: \` + fileSizeMB + "MB");
} finally {
await WakeLockManager.release();
}
},
/**
* Checks if application is running in development mode
* @returns {boolean} True if in development mode
*/
_isDevMode() {
return window.location.hostname === "localhost" || window.location.hostname === "127.0.0.1" || window.location.protocol === "file:" || window.location.port && (window.location.port.startsWith("3") || window.location.port.startsWith("8") || window.location.port.startsWith("5"));
},
/**
* Gets mock transcript for development mode
* @returns {Promise<string>} Mock transcript text
*/
async _getDevModeTranscript() {
return UIManager.updateButtonText("Using dev transcript..."), await new Promise((resolve) => setTimeout(resolve, 1e3)), \`Alright, here's what's happening this week at home:
I have to call the plumber to fix the kitchen sink leak before it floods.
Sort through and donate outgrown kids' clothes by Saturday afternoon.
Grandma's 80th birthday dinner is on Sunday at 6pm. I should also send an invite to the whole family before that.
I can't start painting the living room until the new curtains arrive. They need to be picked up from the tailor first.
And it's pretty important to put some money in an investment account.\`;
},
/**
* Processes audio in production mode (real audio processing)
* @param {Blob} audioBlob - The recorded audio blob
* @param {string} apiKey - OpenAI API key
* @returns {Promise<string>} Transcribed text
*/
async _processProductionAudio(audioBlob, apiKey) {
UIManager.updateButtonText("Transcribing..."), console.log("Transcribing audio with Whisper...");
let transcriptionFormData = new FormData();
transcriptionFormData.append("file", audioBlob, "recording.webm"), transcriptionFormData.append("model", "whisper-1");
let transcriptionResponse = await fetch("https://api.openai.com/v1/audio/transcriptions", {
method: "POST",
headers: {
Authorization: \`Bearer \${apiKey}\`
},
body: transcriptionFormData
});
if (!transcriptionResponse.ok) {
console.log("Transcription failed...");
let errorData = await transcriptionResponse.json();
throw new Error(errorData.error.message || "Error during transcription.");
}
console.log("Transcription successful...");
let transcriptionData = await transcriptionResponse.json();
return console.log(transcriptionData.text), transcriptionData.text;
},
/**
* Formats content for insertion into note
* @param {string} transcriptionText - The transcribed text
* @param {Object} chatGPTData - Processed data from ChatGPT
* @returns {string} Formatted content
*/
_formatContent(transcriptionText, chatGPTData) {
let now = /* @__PURE__ */ new Date(), year = now.getFullYear(), month = String(now.getMonth() + 1).padStart(2, "0"), day = String(now.getDate()).padStart(2, "0"), hours = String(now.getHours()).padStart(2, "0"), minutes = String(now.getMinutes()).padStart(2, "0"), summaryText = chatGPTData.summary && chatGPTData.summary.length > 0 ? chatGPTData.summary.map((point) => \`- \${point}\`).join(\`
\`) : "- No summary available", actionItemsText = chatGPTData.actionItems && chatGPTData.actionItems.length > 0 ? chatGPTData.actionItems.map((item) => \`- [ ] \${item.task}\`).join(\`
\`) : "- [ ] No action items identified";
return \`### \${year}/\${month}/\${day} voice notes taken at [\${hours}:\${minutes}]
 
## Original Transcript
\${transcriptionText}
 
# Summary
\${summaryText}
 
# Action Items
\${actionItemsText}\`;
}
}, AmplenoteTaskManager = {
/**
* Updates task properties in Amplenote based on ChatGPT analysis
* @param {string} noteUUID - UUID of the note containing tasks
* @param {Array} actionItems - Array of action items from ChatGPT
*/
async updateTaskProperties(noteUUID, actionItems) {
try {
let amplenoteTask = await whisperAPI.getNoteTasks(noteUUID), actionToTaskMap = this._mapActionItemsToTasks(actionItems, amplenoteTask);
await this._updateTaskProperties(actionItems, actionToTaskMap), await this._updateBlockingRelationships(actionItems, actionToTaskMap);
} catch (error) {
throw console.error("Error in updateTaskProperties:", error), error;
}
},
/**
* Maps action items to existing Amplenote tasks
* @param {Array} actionItems - Array of action items from ChatGPT
* @param {Array} amplenoteTask - Array of existing Amplenote tasks
* @returns {Map} Mapping between action item IDs and Amplenote tasks
*/
_mapActionItemsToTasks(actionItems, amplenoteTask) {
let actionToTaskMap = /* @__PURE__ */ new Map();
for (let actionItem of actionItems) {
let matchingTask = amplenoteTask.find((task) => {
let taskContent = task.content || "", actionTask = actionItem.task || "";
return taskContent.toLowerCase().includes(actionTask.toLowerCase()) || actionTask.toLowerCase().includes(taskContent.toLowerCase());
});
matchingTask && actionToTaskMap.set(actionItem.id, matchingTask);
}
return actionToTaskMap;
},
/**
* Updates properties for matched tasks
* @param {Array} actionItems - Array of action items from ChatGPT
* @param {Map} actionToTaskMap - Mapping between action items and tasks
*/
async _updateTaskProperties(actionItems, actionToTaskMap) {
for (let actionItem of actionItems) {
let matchingTask = actionToTaskMap.get(actionItem.id);
if (matchingTask) {
let updateProperties = this._convertActionItemToTaskProperties(actionItem);
Object.keys(updateProperties).length > 0 && await whisperAPI.updateTask(matchingTask.uuid, updateProperties);
}
}
},
/**
* Converts action item properties to Amplenote task properties format
* @param {Object} actionItem - Action item from ChatGPT
* @returns {Object} Properties formatted for Amplenote
*/
_convertActionItemToTaskProperties(actionItem) {
let updateProperties = {};
if (actionItem.priority)
switch (actionItem.priority) {
case "important":
updateProperties.important = !0;
break;
case "urgent":
updateProperties.urgent = !0;
break;
case "both":
updateProperties.important = !0, updateProperties.urgent = !0;
break;
case "neither":
break;
}
if (actionItem.deadline && (updateProperties.deadline = Math.floor(new Date(actionItem.deadline).getTime() / 1e3)), actionItem.start && (updateProperties.startAt = Math.floor(new Date(actionItem.start).getTime() / 1e3), actionItem.duration)) {
let durationSeconds = actionItem.duration * 60;
updateProperties.endAt = updateProperties.startAt + durationSeconds;
}
return updateProperties;
},
/**
* Updates blocking relationships between tasks
* @param {Array} actionItems - Array of action items from ChatGPT
* @param {Map} actionToTaskMap - Mapping between action items and tasks
*/
async _updateBlockingRelationships(actionItems, actionToTaskMap) {
for (let actionItem of actionItems) {
let currentTask = actionToTaskMap.get(actionItem.id);
if (currentTask && actionItem.blocking && actionItem.blocking.length > 0) {
let contentToAdd = this._buildBlockingLinks(actionItem.blocking, actionToTaskMap);
contentToAdd && await this._updateTaskContentWithBlockingLinks(currentTask, contentToAdd);
}
}
},
/**
* Builds blocking links for tasks
* @param {Array} blockedIds - Array of blocked task IDs
* @param {Map} actionToTaskMap - Mapping between action items and tasks
* @returns {string} Content to add with blocking links
*/
_buildBlockingLinks(blockedIds, actionToTaskMap) {
let contentToAdd = "";
for (let blockedId of blockedIds) {
let blockedTask = actionToTaskMap.get(blockedId);
if (blockedTask) {
let blockingLink = \`[\${blockedTask.uuid}](https://www.amplenote.com/notes/tasks/\${blockedTask.uuid}?relation=blocking)\`;
contentToAdd += \`
\${blockingLink}\`;
}
}
return contentToAdd;
},
/**
* Updates task content with blocking links
* @param {Object} task - The task to update
* @param {string} contentToAdd - Content to add
*/
async _updateTaskContentWithBlockingLinks(task, contentToAdd) {
let currentContent = task.content || "";
if (!currentContent.includes("?relation=blocking")) {
let updatedContent = currentContent + contentToAdd;
await whisperAPI.updateTask(task.uuid, { content: updatedContent });
}
}
}, AudioRecorder = {
mediaRecorder: null,
audioChunks: [],
isRecording: !1,
audioContext: null,
analyser: null,
dataArray: null,
animationId: null,
options: { mimeType: "audio/webm" },
fileExtension: "webm",
/**
* Initializes the audio recorder with supported MIME types
*/
init() {
if (!MediaRecorder.isTypeSupported("audio/webm"))
if (MediaRecorder.isTypeSupported("audio/mp4"))
this.options = { mimeType: "audio/mp4" }, this.fileExtension = "mp4";
else if (MediaRecorder.isTypeSupported("audio/mpeg"))
this.options = { mimeType: "audio/mpeg" }, this.fileExtension = "mp3";
else if (MediaRecorder.isTypeSupported("audio/wav"))
this.options = { mimeType: "audio/wav" }, this.fileExtension = "wav";
else
throw new Error("No supported audio MIME types found.");
},
/**
* Starts audio recording
* @param {Function} onDataAvailable - Callback for when data is available
* @param {Function} onStop - Callback for when recording stops
* @returns {Promise<Object>} Audio context and canvas for visualization
*/
async startRecording(onDataAvailable, onStop) {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia)
throw new Error("Your browser does not support audio recording.");
let stream = await navigator.mediaDevices.getUserMedia({ audio: !0 });
this.mediaRecorder = new MediaRecorder(stream, this.options), this.mediaRecorder.start(), this.isRecording = !0, this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
let source = this.audioContext.createMediaStreamSource(stream);
return this.analyser = this.audioContext.createAnalyser(), this.analyser.fftSize = 2048, this.analyser.smoothingTimeConstant = 0.85, source.connect(this.analyser), this.dataArray = new Uint8Array(this.analyser.fftSize), this.mediaRecorder.ondataavailable = (e) => {
this.audioChunks.push(e.data), onDataAvailable && onDataAvailable(e);
}, this.mediaRecorder.onstop = async () => {
stream.getTracks().forEach((track) => track.stop());
let audioBlob = new Blob(this.audioChunks, { type: this.options.mimeType });
this.audioChunks = [], this.isRecording = !1, onStop && await onStop(audioBlob);
}, {
audioContext: this.audioContext,
analyser: this.analyser,
dataArray: this.dataArray
};
},
/**
* Stops audio recording
*/
stopRecording() {
this.mediaRecorder && this.isRecording && (this.mediaRecorder.stop(), this.audioContext && this.audioContext.close());
},
/**
* Creates and manages audio visualization canvas
* @param {HTMLElement} container - Container element for the canvas
* @returns {HTMLCanvasElement} The created canvas element
*/
createVisualizationCanvas(container) {
let canvas = document.createElement("canvas");
return canvas.id = "visualizer", canvas.width = container.clientWidth, canvas.height = container.clientHeight, container.appendChild(canvas), canvas;
},
/**
* Draws audio visualization
* @param {HTMLCanvasElement} canvas - Canvas element to draw on
*/
drawVisualization(canvas) {
let canvasCtx = canvas.getContext("2d"), WIDTH = canvas.width, HEIGHT = canvas.height, updateInterval = 0, draw = () => {
if (this.animationId = requestAnimationFrame(draw), updateInterval++ % 2 === 0) {
this.analyser.getByteTimeDomainData(this.dataArray), canvasCtx.fillStyle = "#f7fffa", canvasCtx.fillRect(0, 0, WIDTH, HEIGHT), canvasCtx.lineWidth = 2, canvasCtx.strokeStyle = "#f57542", canvasCtx.shadowBlur = 10, canvasCtx.shadowColor = "#FF5733", canvasCtx.beginPath();
let sliceWidth = WIDTH * 1 / this.dataArray.length, x = 0;
for (let i = 0; i < this.dataArray.length; i++) {
let y = this.dataArray[i] / 128 * HEIGHT / 2;
i === 0 ? canvasCtx.moveTo(x, y) : canvasCtx.lineTo(x, y), x += sliceWidth;
}
canvasCtx.lineTo(WIDTH, HEIGHT / 2), canvasCtx.stroke(), canvasCtx.shadowBlur = 0;
}
};
draw();
},
/**
* Stops visualization animation
*/
stopVisualization() {
this.animationId && (cancelAnimationFrame(this.animationId), this.animationId = null);
}
}, UIManager = {
recordButton: document.getElementById("recordButton"),
buttonText: null,
timer: document.getElementById("timer"),
recordingInterval: null,
secondsElapsed: 0,
/**
* Initializes UI components
*/
init() {
this.buttonText = this.recordButton.querySelector("span");
},
/**
* Updates button text
* @param {string} text - Text to display
*/
updateButtonText(text) {
this.buttonText.textContent = text;
},
/**
* Sets button disabled state
* @param {boolean} disabled - Whether button should be disabled
*/
setButtonDisabled(disabled) {
this.recordButton.disabled = disabled, disabled ? this.recordButton.classList.add("disabled") : this.recordButton.classList.remove("disabled");
},
/**
* Shows/hides timer
* @param {boolean} show - Whether to show timer
*/
showTimer(show) {
this.timer.style.display = show ? "block" : "none";
},
/**
* Starts the recording timer
*/
startTimer() {
this.secondsElapsed = 0, this.updateTimerDisplay(), this.recordingInterval = setInterval(() => {
this.secondsElapsed++, this.updateTimerDisplay();
}, 1e3);
},
/**
* Stops the recording timer
*/
stopTimer() {
this.recordingInterval && (clearInterval(this.recordingInterval), this.recordingInterval = null);
},
/**
* Updates timer display
*/
updateTimerDisplay() {
let minutes = Math.floor(this.secondsElapsed / 60), seconds = this.secondsElapsed % 60;
this.timer.textContent = \`\${String(minutes).padStart(2, "0")}:\${String(seconds).padStart(2, "0")}\`;
},
/**
* Adds canvas to record button
* @param {HTMLCanvasElement} canvas - Canvas to add
*/
addCanvasToButton(canvas) {
this.recordButton.appendChild(canvas);
},
/**
* Removes canvas from record button
* @param {HTMLCanvasElement} canvas - Canvas to remove
*/
removeCanvasFromButton(canvas) {
this.recordButton.contains(canvas) && this.recordButton.removeChild(canvas);
}
}, WakeLockManager = {
wakeLock: null,
/**
* Requests wake lock to keep screen on
* @returns {Promise<boolean>} True if wake lock was acquired
*/
async acquire() {
try {
return "wakeLock" in navigator ? (this.wakeLock = await navigator.wakeLock.request("screen"), console.log("Wake lock acquired - screen will stay on"), this.wakeLock.addEventListener("release", () => {
console.log("Wake lock was released");
}), !0) : (console.warn("Wake Lock API not supported in this browser"), !1);
} catch (error) {
return console.error("Failed to acquire wake lock:", error), !1;
}
},
/**
* Releases the wake lock to allow screen to turn off
*/
async release() {
try {
this.wakeLock && (await this.wakeLock.release(), this.wakeLock = null, console.log("Wake lock released - screen can turn off normally"));
} catch (error) {
console.error("Failed to release wake lock:", error);
}
},
/**
* Checks if wake lock is currently active
* @returns {boolean} True if wake lock is active
*/
isActive() {
return this.wakeLock && !this.wakeLock.released;
},
/**
* Re-acquires wake lock if it was previously active but got released
* (e.g., when user returns to tab)
*/
async reacquireIfNeeded() {
this.wakeLock && this.wakeLock.released && (console.log("Wake lock was released, attempting to re-acquire..."), await this.acquire());
}
}, VoiceNotesApp = {
canvas: null,
/**
* Initializes the voice notes application
*/
async init() {
try {
AudioRecorder.init(), UIManager.init(), this._setupEventListeners(), this._setupPageVisibilityHandling(), await this._checkAutoStart();
} catch (error) {
console.error("Error initializing app:", error), await whisperAPI.showAlert("Error initializing voice notes app: " + error.message);
}
},
/**
* Sets up event listeners for the application
*/
_setupEventListeners() {
UIManager.recordButton.addEventListener("click", async () => {
AudioRecorder.isRecording ? this._stopRecording() : await this._startRecording();
});
},
/**
* Sets up page visibility handling to manage wake lock
*/
_setupPageVisibilityHandling() {
document.addEventListener("visibilitychange", async () => {
document.visibilityState === "visible" && await WakeLockManager.reacquireIfNeeded();
});
},
/**
* Starts audio recording
*/
async _startRecording() {
try {
await WakeLockManager.acquire();
let { analyser, dataArray } = await AudioRecorder.startRecording(
null,
// onDataAvailable callback
async (audioBlob) => await this._onRecordingStop(audioBlob)
);
UIManager.updateButtonText("Stop Recording"), UIManager.showTimer(!0), UIManager.startTimer(), this.canvas = AudioRecorder.createVisualizationCanvas(UIManager.recordButton), AudioRecorder.drawVisualization(this.canvas);
} catch (error) {
console.error("Error starting recording:", error), await WakeLockManager.release(), await whisperAPI.showAlert("Error starting recording: " + error.message);
}
},
/**
* Stops audio recording
*/
_stopRecording() {
AudioRecorder.stopRecording();
},
/**
* Handles recording stop event
* @param {Blob} audioBlob - The recorded audio blob
*/
async _onRecordingStop(audioBlob) {
AudioRecorder.stopVisualization(), this.canvas && (UIManager.removeCanvasFromButton(this.canvas), this.canvas = null), UIManager.updateButtonText("Processing..."), UIManager.setButtonDisabled(!0), UIManager.stopTimer(), UIManager.showTimer(!1);
try {
await AudioProcessor.processAudioRecording(audioBlob);
} catch (error) {
console.error("Error processing recording:", error), await WakeLockManager.release();
} finally {
UIManager.updateButtonText("Start Recording"), UIManager.setButtonDisabled(!1);
}
},
/**
* Checks if recording should auto-start
*/
async _checkAutoStart() {
console.log("Checking if we should auto-start recording...");
try {
let wasJustInvoked = await whisperAPI.wasJustInvoked();
console.log("wasJustInvoked from appOption:", wasJustInvoked), wasJustInvoked ? (console.log("Auto-starting recording because plugin was just invoked from appOption..."), UIManager.recordButton.click()) : console.log("Not auto-starting recording - user just revisited the sidebar");
} catch (error) {
console.log("Error checking if just invoked:", error), console.log("Not auto-starting recording due to error");
}
}
};
async function run() {
await VoiceNotesApp.init();
}
try {
run().then((result) => console.log(result));
} catch (err) {
console.log(err);
}
})();
<\/script>
</body>
</html>`;
 
// plugin/plugin.js
var plugin = {
context: null,
installed: !1,
justInvokedFromAppOption: !1,
appOption: {
"Record voice notes": async (app) => {
try {
console.log("appOption called, plugin.installed =", plugin.installed), plugin.justInvokedFromAppOption = !0, console.log("Set justInvokedFromAppOption = true"), await app.openEmbed(plugin.installed), await app.navigate("https://www.amplenote.com/notes/plugins/" + app.context.pluginUUID);
} catch (error) {
console.error("Error in appOption:", error), await app.alert("Error in appOption: " + error.message);
}
}
},
async onEmbedCall(app, ...args) {
if (console.log(args), args[0] === "getApiKey") {
let apiKey = app.settings["OPENAI API KEY"];
return apiKey || (apiKey = await app.prompt("Please enter your OpenAI API key (you can create one at https://platform.openai.com/api-keys)", {
inputs: [
{ label: "API Key", type: "text", placeholder: "Enter your OpenAI API key here" }
]
}), await app.setSetting("OPENAI API KEY", apiKey)), apiKey;
} else if (args[0] === "getModel") {
let model = app.settings["OPENAI MODEL"];
if (!model) {
let results = await app.prompt("Which OpenAI model do you want to use for transcription and chat completion?", {
inputs: [
{ label: "Model", type: "select", options: [
{ label: "gpt-5 (very slow and very smart)", value: "gpt-5" },
{ label: "gpt-5-mini (fast and smart)", value: "gpt-5-mini" },
{ label: "gpt-5-nano (very fast and less smart)", value: "gpt-5-nano" },
{ label: "gpt-4.1-mini (fast and smart, non-reasoning; recommended \u{1F44B})", value: "gpt-4o-mini" },
{ label: "gpt-4.1-nano (very fast and less smart, non-reasoning)", value: "gpt-4.1-mini" },
{ label: "gpt-4.1 (slow and very smart, non-reasoning)", value: "gpt-4.1" },
{ label: "other (enter the model name below)", value: null }
] },
{ label: "Model name", type: "text", placeholder: "Enter another model name here (OPTIONAL)", value: model }
]
}), modelFromList = results[0], modelFromInput = results[1];
modelFromList ? model = modelFromList : modelFromInput ? model = modelFromInput : (await app.alert("No model selected, using the default one. You can change this at any time on the plugin settings page."), model = "gpt-4.1-mini"), await app.setSetting("OPENAI MODEL", model);
}
return model;
} else if (args[0] === "insertText") {
let textToInsert = args[1], noteHandle = await this.ensureDestinationNote(app);
return await app.insertNoteContent(noteHandle, textToInsert, { atEnd: !0 }), noteHandle.uuid;
} else {
if (args[0] === "showAlert")
return await app.alert(args[1]), !0;
if (args[0] === "wasJustInvoked") {
let wasJustInvoked = this.justInvokedFromAppOption;
return this.justInvokedFromAppOption = !1, wasJustInvoked;
} else {
if (args[0] === "getCurrentNoteUUID")
return this.context;
if (args[0] === "getNoteTasks") {
let noteUUID = args[1];
return await app.getNoteTasks(noteUUID);
} else if (args[0] === "updateTask") {
let taskUUID = args[1], properties = args[2];
return await app.updateTask(taskUUID, properties);
}
}
}
},
async ensureDestinationNote(app) {
let now = /* @__PURE__ */ new Date(), year = now.getFullYear(), month = String(now.getMonth() + 1).padStart(2, "0"), day = String(now.getDate()).padStart(2, "0"), hours = String(now.getHours()).padStart(2, "0"), minutes = String(now.getMinutes()).padStart(2, "0"), noteTitle = `${year}/${month}/${day} voice notes taken at [${hours}:${minutes}]`, destinationNote = await app.findNote({ name: noteTitle, tags: ["system/voice-notes"] });
if (console.log(destinationNote), !destinationNote) {
let destinationNoteTag = await plugin.getDestinationNoteTag(app), destinationNoteUUID = await app.createNote(noteTitle, [destinationNoteTag]);
destinationNote = await app.findNote({ uuid: destinationNoteUUID });
let contents = await app.getNoteContent({ uuid: destinationNoteUUID });
console.log(contents);
}
return this.context = destinationNote.uuid, destinationNote;
},
async getDestinationNoteTag(app) {
let destinationNoteTag = app.settings["DESTINATION NOTE TAG"];
return destinationNoteTag || (destinationNoteTag = "system/voice-notes", destinationNoteTag = await app.prompt("What tag do you want to use for the destination transcript note?", {
inputs: [
{ label: "Tag", type: "tags", placeholder: "daily-jots" }
]
}), destinationNoteTag || (destinationNoteTag = "daily-jots"), await app.setSetting("DESTINATION NOTE TAG", destinationNoteTag)), destinationNoteTag;
},
renderEmbed(app) {
return console.log("renderEmbed called"), this.installed = !0, embed_default;
}
}, plugin_default = plugin;
return plugin;
})()
//# sourceMappingURL=plugin.js.map