|
1 |
| -// background.js - Handles requests from the frontend, runs the model, then sends back a response |
2 |
| -// TODO - make persistent (i.e., do not close after inactivity) |
3 |
| - |
4 |
| -if (typeof ServiceWorkerGlobalScope !== 'undefined' && self instanceof ServiceWorkerGlobalScope) { |
5 |
| - // Load the library |
6 |
| - const { pipeline, env } = require('@xenova/transformers'); |
7 |
| - |
8 |
| - // Set environment variables to only use local models. |
9 |
| - env.useBrowserCache = false; |
10 |
| - env.remoteModels = false; |
11 |
| - env.localModelPath = chrome.runtime.getURL('models/') |
12 |
| - env.backends.onnx.wasm.wasmPaths = chrome.runtime.getURL('wasm/') |
13 |
| - env.backends.onnx.wasm.numThreads = 1; |
14 |
| - |
15 |
| - // TODO: Replace this with your own task and model |
16 |
| - const task = 'text-classification'; |
17 |
| - const model = 'distilbert-base-uncased-finetuned-sst-2-english'; |
18 |
| - |
19 |
| - // Load model, storing the promise that is returned from the pipeline function. |
20 |
| - // Doing it this way will load the model in the background as soon as the worker is created. |
21 |
| - // To actually use the model, you must call `await modelPromise` to get the actual classifier. |
22 |
| - const modelPromise = pipeline(task, model, { |
23 |
| - progress_callback: (data) => { |
24 |
| - // If you would like to add a progress bar for model loading, |
25 |
| - // you can send `data` back to the UI. |
| 1 | +// background.js - Handles requests from the UI, runs the model, then sends back a response |
| 2 | + |
| 3 | +import { pipeline, env } from '@xenova/transformers'; |
| 4 | +import { CustomCache } from "./cache.js"; |
| 5 | + |
| 6 | +// Define caching parameters |
| 7 | +env.useBrowserCache = false; |
| 8 | +env.useCustomCache = true; |
| 9 | +env.customCache = new CustomCache('transformers-cache'); |
| 10 | + |
| 11 | +// Skip initial check for local models, since we are not loading any local models. |
| 12 | +env.allowLocalModels = false; |
| 13 | + |
| 14 | +// Due to a bug in onnxruntime-web, we must disable multithreading for now. |
| 15 | +// See https://github.com/microsoft/onnxruntime/issues/14445 for more information. |
| 16 | +env.backends.onnx.wasm.numThreads = 1; |
| 17 | + |
| 18 | + |
| 19 | +class PipelineSingleton { |
| 20 | + static task = 'text-classification'; |
| 21 | + static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; |
| 22 | + static instance = null; |
| 23 | + |
| 24 | + static async getInstance(progress_callback = null) { |
| 25 | + if (this.instance === null) { |
| 26 | + this.instance = pipeline(this.task, this.model, { progress_callback }); |
26 | 27 | }
|
| 28 | + |
| 29 | + return this.instance; |
| 30 | + } |
| 31 | +} |
| 32 | + |
| 33 | +// Create generic classify function, which will be reused for the different types of events. |
| 34 | +const classify = async (text) => { |
| 35 | + // Get the pipeline instance. This will load and build the model when run for the first time. |
| 36 | + let model = await PipelineSingleton.getInstance((data) => { |
| 37 | + // You can track the progress of the pipeline creation here. |
| 38 | + // e.g., you can send `data` back to the UI to indicate a progress bar |
| 39 | + // console.log('progress', data) |
27 | 40 | });
|
28 | 41 |
|
| 42 | + // Actually run the model on the input text |
| 43 | + let result = await model(text); |
| 44 | + return result; |
| 45 | +}; |
| 46 | + |
| 47 | +////////////////////// 1. Context Menus ////////////////////// |
| 48 | +// |
| 49 | +// Add a listener to create the initial context menu items, |
| 50 | +// context menu items only need to be created at runtime.onInstalled |
| 51 | +chrome.runtime.onInstalled.addListener(function () { |
| 52 | + // Register a context menu item that will only show up for selection text. |
| 53 | + chrome.contextMenus.create({ |
| 54 | + id: 'classify-selection', |
| 55 | + title: 'Classify "%s"', |
| 56 | + contexts: ['selection'], |
| 57 | + }); |
| 58 | +}); |
29 | 59 |
|
30 |
| - // Listen for messages from the UI, process it, and send the result back. |
31 |
| - chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { |
| 60 | +// Perform inference when the user clicks a context menu |
| 61 | +chrome.contextMenus.onClicked.addListener(async (info, tab) => { |
| 62 | + // Ignore context menu clicks that are not for classifications (or when there is no input) |
| 63 | + if (info.menuItemId !== 'classify-selection' || !info.selectionText) return; |
32 | 64 |
|
33 |
| - // Run model prediction asynchronously |
34 |
| - (async function () { |
35 |
| - let model = await modelPromise; // 1. Load model if not already loaded |
36 |
| - let result = await model(message); // 2. Run model prediction |
37 |
| - sendResponse(result); // 3. Send response back to UI |
38 |
| - })(); |
| 65 | + // Perform classification on the selected text |
| 66 | + let result = await classify(info.selectionText); |
39 | 67 |
|
40 |
| - // return true to indicate we will send a response asynchronously |
41 |
| - // see https://stackoverflow.com/a/46628145 for more information |
42 |
| - return true; |
| 68 | + // Do something with the result |
| 69 | + chrome.scripting.executeScript({ |
| 70 | + target: { tabId: tab.id }, // Run in the tab that the user clicked in |
| 71 | + args: [result], // The arguments to pass to the function |
| 72 | + function: (result) => { // The function to run |
| 73 | + // NOTE: This function is run in the context of the web page, meaning that `document` is available. |
| 74 | + console.log('result', result) |
| 75 | + console.log('document', document) |
| 76 | + }, |
43 | 77 | });
|
44 |
| -} |
| 78 | +}); |
| 79 | +////////////////////////////////////////////////////////////// |
| 80 | + |
| 81 | +////////////////////// 2. Message Events ///////////////////// |
| 82 | +// |
| 83 | +// Listen for messages from the UI, process it, and send the result back. |
| 84 | +chrome.runtime.onMessage.addListener((message, sender, sendResponse) => { |
| 85 | + console.log('sender', sender) |
| 86 | + if (message.action !== 'classify') return; // Ignore messages that are not meant for classification. |
| 87 | + |
| 88 | + // Run model prediction asynchronously |
| 89 | + (async function () { |
| 90 | + // Perform classification |
| 91 | + let result = await classify(message.text); |
| 92 | + |
| 93 | + // Send response back to UI |
| 94 | + sendResponse(result); |
| 95 | + })(); |
| 96 | + |
| 97 | + // return true to indicate we will send a response asynchronously |
| 98 | + // see https://stackoverflow.com/a/46628145 for more information |
| 99 | + return true; |
| 100 | +}); |
| 101 | +////////////////////////////////////////////////////////////// |
| 102 | + |
0 commit comments