Size
14.2 KB
Version
1.0.1
Created
Apr 2, 2026
Updated
15 days ago
1// ==UserScript==
2// @name YouTube Transcript Analyzer & Screenshot Capture
3// @description A new extension
4// @version 1.0.1
5// @match https://*.youtube.com/*
6// @icon https://www.youtube.com/s/desktop/5d82a8bc/img/favicon_32x32.png
7// ==/UserScript==
8(function() {
9 'use strict';
10
11 // Create the main UI elements for our extension
12 function createUI() {
13 // Create a container for our extension
14 const container = document.createElement('div');
15 container.id = 'transcript-analyzer-container';
16 container.style.cssText = `
17 margin: 20px 0;
18 padding: 15px;
19 border-radius: 8px;
20 background-color: #f1f1f1;
21 border: 1px solid #ddd;
22 font-family: Arial, sans-serif;
23 `;
24
25 // Create title for our extension
26 const title = document.createElement('h2');
27 title.textContent = 'Transcript Analyzer & Screenshot Capture';
28 title.style.cssText = `
29 margin-top: 0;
30 color: #212121;
31 font-size: 1.2em;
32 `;
33
34 // Create analyze button
35 const analyzeButton = document.createElement('button');
36 analyzeButton.id = 'analyze-transcript-btn';
37 analyzeButton.textContent = 'Analyze Transcript & Capture Screenshots';
38 analyzeButton.style.cssText = `
39 background-color: #ff0000;
40 color: white;
41 border: none;
42 padding: 10px 15px;
43 border-radius: 4px;
44 cursor: pointer;
45 font-weight: bold;
46 margin: 10px 0;
47 `;
48
49 // Add hover effect to button
50 analyzeButton.addEventListener('mouseenter', () => {
51 analyzeButton.style.backgroundColor = '#cc0000';
52 });
53
54 analyzeButton.addEventListener('mouseleave', () => {
55 analyzeButton.style.backgroundColor = '#ff0000';
56 });
57
58 // Create results container
59 const resultsContainer = document.createElement('div');
60 resultsContainer.id = 'analysis-results';
61 resultsContainer.style.cssText = `
62 margin-top: 15px;
63 display: none;
64 `;
65
66 // Create loading indicator
67 const loadingIndicator = document.createElement('div');
68 loadingIndicator.id = 'loading-indicator';
69 loadingIndicator.textContent = 'Analyzing transcript and capturing screenshots...';
70 loadingIndicator.style.cssText = `
71 display: none;
72 color: #212121;
73 font-style: italic;
74 `;
75
76 // Assemble the UI
77 container.appendChild(title);
78 container.appendChild(analyzeButton);
79 container.appendChild(loadingIndicator);
80 container.appendChild(resultsContainer);
81
82 // Insert our container below the video player
83 const descriptionSection = document.querySelector('#description');
84 if (descriptionSection) {
85 descriptionSection.parentNode.insertBefore(container, descriptionSection);
86 } else {
87 // Fallback: insert after the video player
88 const player = document.querySelector('#movie_player');
89 if (player && player.parentNode) {
90 player.parentNode.appendChild(container);
91 }
92 }
93
94 // Add event listener to the analyze button
95 analyzeButton.addEventListener('click', analyzeTranscriptAndCaptureScreenshots);
96 }
97
98 // Function to analyze transcript and capture screenshots
99 async function analyzeTranscriptAndCaptureScreenshots() {
100 const loadingIndicator = document.getElementById('loading-indicator');
101 const resultsContainer = document.getElementById('analysis-results');
102 const analyzeButton = document.getElementById('analyze-transcript-btn');
103
104 // Show loading indicator
105 loadingIndicator.style.display = 'block';
106 resultsContainer.style.display = 'none';
107 analyzeButton.disabled = true;
108
109 try {
110 // Get the transcript
111 const transcript = await getTranscript();
112
113 // Process transcript into vibe prompt
114 const vibePrompt = processTranscript(transcript);
115
116 // Capture screenshots
117 const screenshots = await captureScreenshots(transcript);
118
119 // Display results
120 displayResults(vibePrompt, screenshots);
121 } catch (error) {
122 console.error('Error analyzing transcript:', error);
123 resultsContainer.innerHTML = `<p style="color: red;">Error: ${error.message}</p>`;
124 resultsContainer.style.display = 'block';
125 } finally {
126 loadingIndicator.style.display = 'none';
127 analyzeButton.disabled = false;
128 }
129 }
130
131 // Function to get transcript
132 async function getTranscript() {
133 try {
134 // Get the video ID from the current URL
135 const videoId = new URLSearchParams(window.location.search).get('v');
136 if (!videoId) {
137 throw new Error('Could not extract video ID from URL');
138 }
139
140 // Fetch the video page
141 const response = await fetch(`https://www.youtube.com/watch?v=${videoId}`);
142 const html = await response.text();
143
144 // Extract the ytInitialPlayerResponse object
145 const ytInitialPlayerResponseMatch = html.match(/ytInitialPlayerResponse\s*=\s*({.+?})\s*;/);
146 if (!ytInitialPlayerResponseMatch) {
147 throw new Error('Could not find ytInitialPlayerResponse in page');
148 }
149
150 const playerResponse = JSON.parse(ytInitialPlayerResponseMatch[1]);
151
152 // Check if captions are available
153 if (!playerResponse.captions || !playerResponse.captions.playerCaptionsTracklistRenderer) {
154 throw new Error('No captions available for this video');
155 }
156
157 // Get caption tracks
158 const captionTracks = playerResponse.captions.playerCaptionsTracklistRenderer.captionTracks;
159 if (!captionTracks || captionTracks.length === 0) {
160 throw new Error('No caption tracks found');
161 }
162
163 // Find the English caption track (or the first available one)
164 let captionTrack = captionTracks.find(track => track.languageCode === 'en') || captionTracks[0];
165
166 // Fetch the transcript
167 const transcriptResponse = await fetch(`${captionTrack.baseUrl}&fmt=json3`);
168 const transcriptData = await transcriptResponse.json();
169
170 // Process the transcript into our desired format
171 const transcript = transcriptData.events
172 .filter(event => event.segs)
173 .map(event => ({
174 text: event.segs.map(seg => seg.utf8).join(''),
175 start: event.tStartMs / 1000,
176 dur: event.dDurationMs / 1000
177 }));
178
179 return transcript;
180 } catch (error) {
181 console.error('Error fetching transcript:', error);
182 throw new Error(`Failed to fetch transcript: ${error.message}`);
183 }
184 }
185
186 // Function to process transcript into vibe prompt
187 function processTranscript(transcript) {
188 try {
189 // Combine all transcript text
190 const fullText = transcript.map(segment => segment.text).join(' ');
191
192 // Extract key information
193 const wordCount = fullText.split(/\s+/).length;
194 const duration = transcript.length > 0 ?
195 transcript[transcript.length - 1].start + transcript[transcript.length - 1].dur : 0;
196
197 // Simple keyword extraction (in a real implementation, you might use a more sophisticated NLP library)
198 const commonWords = ['the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should', 'may', 'might', 'must', 'can'];
199 const words = fullText.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
200 const keywords = words
201 .filter(word => word.length > 4 && !commonWords.includes(word))
202 .reduce((acc, word) => {
203 acc[word] = (acc[word] || 0) + 1;
204 return acc;
205 }, {});
206
207 // Get top keywords
208 const topKeywords = Object.entries(keywords)
209 .sort((a, b) => b[1] - a[1])
210 .slice(0, 10)
211 .map(([word]) => word);
212
213 // Determine tone based on keywords (simplified approach)
214 const positiveWords = ['good', 'great', 'excellent', 'amazing', 'fantastic', 'wonderful', 'awesome', 'brilliant', 'outstanding'];
215 const negativeWords = ['bad', 'terrible', 'awful', 'horrible', 'worst', 'disappointing', 'poor', 'negative'];
216
217 let tone = 'neutral';
218 let positiveCount = 0;
219 let negativeCount = 0;
220
221 topKeywords.forEach(keyword => {
222 if (positiveWords.includes(keyword)) positiveCount++;
223 if (negativeWords.includes(keyword)) negativeCount++;
224 });
225
226 if (positiveCount > negativeCount) tone = 'positive';
227 else if (negativeCount > positiveCount) tone = 'negative';
228
229 // Generate vibe prompt
230 const minutes = Math.floor(duration / 60);
231 const vibePrompt = `Create content with a ${tone} tone that explores topics related to: ${topKeywords.join(', ')}. This is based on a ${minutes}-minute video transcript with ${wordCount} words. The content should capture the main themes and key discussion points.`;
232
233 return vibePrompt;
234 } catch (error) {
235 console.error('Error processing transcript:', error);
236 return 'Error generating vibe prompt from transcript';
237 }
238 }
239
240 // Function to capture screenshots (updated implementation)
241 async function captureScreenshots(transcript) {
242 try {
243 // Get the video ID from the current URL
244 const videoId = new URLSearchParams(window.location.search).get('v');
245 if (!videoId) {
246 throw new Error('Could not extract video ID from URL');
247 }
248
249 // Extract key time points from transcript (every 30 seconds or at segment changes)
250 const timePoints = [];
251 const interval = 30; // seconds
252 let lastTime = -interval;
253
254 transcript.forEach(segment => {
255 // Add time point if it's been at least 30 seconds since the last one
256 if (segment.start - lastTime >= interval) {
257 timePoints.push({
258 time: segment.start,
259 text: segment.text.substring(0, 100) + (segment.text.length > 100 ? '...' : '')
260 });
261 lastTime = segment.start;
262 }
263 });
264
265 // Also add some key moments from the transcript (segments with more text)
266 transcript
267 .filter(segment => segment.text.length > 150)
268 .slice(0, 3)
269 .forEach(segment => {
270 // Avoid duplicates
271 if (!timePoints.some(tp => Math.abs(tp.time - segment.start) < 5)) {
272 timePoints.push({
273 time: segment.start,
274 text: segment.text.substring(0, 100) + (segment.text.length > 100 ? '...' : '')
275 });
276 }
277 });
278
279 // Sort time points by time
280 timePoints.sort((a, b) => a.time - b.time);
281
282 // Generate YouTube preview image URLs
283 const screenshots = timePoints.map(point => {
284 // Convert time to YouTube time format (HH:MM:SS)
285 const hours = Math.floor(point.time / 3600);
286 const minutes = Math.floor((point.time % 3600) / 60);
287 const seconds = Math.floor(point.time % 60);
288 const timeString = `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`;
289
290 // Create YouTube preview image URL
291 // Using maxresdefault.jpg for highest quality
292 const imageUrl = `https://img.youtube.com/vi/${videoId}/${Math.floor(point.time/30)}.jpg`;
293
294 return {
295 time: point.time,
296 timeString: timeString,
297 url: imageUrl,
298 description: point.text
299 };
300 });
301
302 return screenshots.slice(0, 6); // Return at most 6 screenshots
303 } catch (error) {
304 console.error('Error capturing screenshots:', error);
305 throw new Error(`Failed to capture screenshots: ${error.message}`);
306 }
307 }
308
309 // Function to display results
310 function displayResults(vibePrompt, screenshots) {
311 const resultsContainer = document.getElementById('analysis-results');
312
313 let screenshotsHTML = '';
314 screenshots.forEach(screenshot => {
315 screenshotsHTML += `
316 <div style="margin: 10px 0;">
317 <img src="${screenshot.url}" alt="Screenshot at ${screenshot.time}s" style="max-width: 320px; height: auto;">
318 <p style="font-size: 0.9em; color: #666;">Time: ${screenshot.time}s</p>
319 </div>
320 `;
321 });
322
323 resultsContainer.innerHTML = `
324 <h3>Vibe Prompt:</h3>
325 <p style="background-color: #e9e9e9; padding: 10px; border-radius: 4px;">${vibePrompt}</p>
326 <h3>Captured Screenshots:</h3>
327 <div>${screenshotsHTML}</div>
328 `;
329
330 resultsContainer.style.display = 'block';
331 }
332
333 // Initialize the extension when the page loads
334 function init() {
335 // Wait for the page to load completely
336 if (document.readyState === 'loading') {
337 document.addEventListener('DOMContentLoaded', createUI);
338 } else {
339 createUI();
340 }
341 }
342
343 // Start the extension
344 init();
345
346})();