const topics = [
{
page: "OneClick-Solutions.ch",
content: "Free Access: Test-drive the future of AI safety training. This simulator helps teams build better AI judgment in realistic moments.",
stats: "1 repost"
},
{
page: "RestorFX Switzerland",
content: "Revolutionizing surface restoration with AI-driven analysis in Switzerland. #RestorFX #AutomotiveInnovation",
stats: "1,927 views"
},
{
page: "Clean Solutions",
content: "Scaling industrial cleaning solutions through predictive maintenance data. #CleanSolutions #OperationalExcellence",
stats: "999 impressions"
}
];
const feed = document.getElementById('feed');
const video = document.getElementById('videoElement');
const previewVideo = document.getElementById('previewVideo');
const canvas = document.getElementById('presenterCanvas');
const ctx = canvas.getContext('2d', { willReadFrequently: true });
const statusBox = document.getElementById('statusBox');
const cueTitle = document.getElementById('cueTitle');
const cueText = document.getElementById('cueText');
const enableCameraBtn = document.getElementById('enableCameraBtn');
const startSegBtn = document.getElementById('startSegBtn');
const recordBtn = document.getElementById('recordBtn');
let cameraStream = null;
let segmenter = null;
let isSegmenting = false;
let recorder = null;
let recordedChunks = [];
let animationFrameId = null;
// Audio state
let audioCtx = null;
let micSource = null;
let streamDest = null;
let audioLevel = 0;
function setStatus(text) {
statusBox.textContent = `Status: ${text}`;
}
function setCue(title, text) {
cueTitle.textContent = title;
cueText.textContent = text;
}
window.updateCueFromTopic = function(index) {
const topic = topics[index];
setCue("Look here while speaking", `${topic.page}: ${topic.content}`);
};
function renderFeed() {
let html = '';
topics.forEach((topic, idx) => {
html += `
${topic.page[0]}
${topic.page}
Company • Training
${topic.content}
${topic.stats}
`;
});
feed.innerHTML = html.repeat(3);
if (window.lucide) lucide.createIcons();
}
async function startCamera() {
setStatus('Requesting camera & mic...');
try {
if (cameraStream) {
cameraStream.getTracks().forEach(t => t.stop());
cameraStream = null;
}
// Requesting both at once is more reliable for synchronized streams
cameraStream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: 'user', width: { ideal: 1280 }, height: { ideal: 720 } },
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
}
});
video.srcObject = cameraStream;
previewVideo.srcObject = cameraStream;
await Promise.all([video.play(), previewVideo.play()]);
// Initialize Audio Analysis to confirm mic is working
setupAudioAnalysis(cameraStream);
setStatus('Camera & Audio active (Check VU meter)');
} catch (err) {
console.error('Hardware Error:', err);
setStatus('Hardware error: ' + err.message);
}
}
// Visual feedback for Audio - if this moves, audio WILL be in the recording
function setupAudioAnalysis(stream) {
const audioTracks = stream.getAudioTracks();
if (audioTracks.length === 0) return;
if (!audioCtx) audioCtx = new (window.AudioContext || window.webkitAudioContext)();
micSource = audioCtx.createMediaStreamSource(stream);
const analyzer = audioCtx.createAnalyser();
analyzer.fftSize = 256;
micSource.connect(analyzer);
const bufferLength = analyzer.frequencyBinCount;
const dataArray = new Uint8Array(bufferLength);
const checkLevel = () => {
analyzer.getByteFrequencyData(dataArray);
let sum = 0;
for (let i = 0; i < bufferLength; i++) sum += dataArray[i];
audioLevel = sum / bufferLength;
// Update a small visual indicator in the status box
const meter = "I".repeat(Math.floor(audioLevel / 5));
if (audioLevel > 1) {
statusBox.setAttribute('data-audio', 'active');
// Logic to show a tiny green bar could go here
}
if (cameraStream) requestAnimationFrame(checkLevel);
};
checkLevel();
}
async function startSegmentation() {
if (isSegmenting) {
isSegmenting = false;
if (animationFrameId) cancelAnimationFrame(animationFrameId);
startSegBtn.textContent = 'Enable Cutout';
ctx.clearRect(0, 0, canvas.width, canvas.height);
return;
}
if (!cameraStream) {
await startCamera();
if (!cameraStream) return;
}
setStatus('Loading AI Processor...');
try {
await tf.ready();
if (!segmenter) {
segmenter = await bodySegmentation.createSegmenter(
bodySegmentation.SupportedModels.MediaPipeSelfieSegmentation,
{ runtime: 'tfjs', modelType: 'general' }
);
}
isSegmenting = true;
startSegBtn.textContent = 'Disable Cutout';
setStatus('Cutout Active');
runSegmentation();
} catch (err) {
setStatus('AI Error: ' + err.message);
}
}
async function runSegmentation() {
const tempCanvas = document.createElement('canvas');
const tempCtx = tempCanvas.getContext('2d');
const loop = async () => {
if (!isSegmenting) return;
if (video.readyState >= 2) {
if (tempCanvas.width !== video.videoWidth) {
tempCanvas.width = video.videoWidth;
tempCanvas.height = video.videoHeight;
}
try {
const segmentation = await segmenter.segmentPeople(video);
const mask = await bodySegmentation.toBinaryMask(segmentation);
ctx.clearRect(0, 0, canvas.width, canvas.height);
tempCtx.globalCompositeOperation = 'source-over';
tempCtx.putImageData(mask, 0, 0);
tempCtx.globalCompositeOperation = 'source-in';
tempCtx.drawImage(video, 0, 0);
const aspect = video.videoWidth / video.videoHeight;
const h = canvas.height * 0.7;
const w = h * aspect;
ctx.shadowBlur = 15;
ctx.shadowColor = "rgba(0,0,0,0.3)";
ctx.drawImage(tempCanvas, 40, canvas.height - h - 40, w, h);
} catch (e) {}
}
animationFrameId = requestAnimationFrame(loop);
};
loop();
}
async function startRecording() {
try {
if (!cameraStream) await startCamera();
// Ensure AudioContext is resumed (Browsers often block audio until a click happens)
if (audioCtx) await audioCtx.resume();
setStatus('Preparing High-Fidelity Capture...');
if (!isSegmenting) {
await startSegmentation();
await new Promise(r => setTimeout(r, 600));
}
recordedChunks = [];
// 1. Capture Video from Canvas
const canvasStream = canvas.captureStream(30);
// 2. Capture Audio manually through a Destination Node for maximum compatibility
if (!streamDest && audioCtx) {
streamDest = audioCtx.createMediaStreamDestination();
micSource.connect(streamDest);
}
const finalTracks = [...canvasStream.getVideoTracks()];
// Use the Processed Audio Track if available, else fallback to raw stream
if (streamDest && streamDest.stream.getAudioTracks().length > 0) {
finalTracks.push(streamDest.stream.getAudioTracks()[0]);
} else if (cameraStream.getAudioTracks().length > 0) {
finalTracks.push(cameraStream.getAudioTracks()[0]);
}
const combinedStream = new MediaStream(finalTracks);
// Force high-quality Opus audio codec
const options = {
mimeType: 'video/webm;codecs=vp8,opus',
audioBitsPerSecond: 128000
};
recorder = new MediaRecorder(combinedStream, options);
recorder.ondataavailable = (e) => {
if (e.data && e.data.size > 0) recordedChunks.push(e.data);
};
recorder.onstop = () => {
if (recordedChunks.length === 0) {
setStatus('Recording failed: No data captured.');
return;
}
const blob = new Blob(recordedChunks, { type: 'video/webm' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `LinkedIn-Training-${Date.now()}.webm`;
a.click();
setStatus('Recording Saved (Check for Audio)');
recorder = null;
};
recorder.start(200); // Small time slices make for more robust files
recordBtn.textContent = 'Stop Recording';
recordBtn.classList.replace('red', 'slate');
setStatus('REC ● AUDIO CAPTURE ACTIVE');
} catch (err) {
setStatus('Record Error: ' + err.message);
}
}
enableCameraBtn.addEventListener('click', startCamera);
startSegBtn.addEventListener('click', startSegmentation);
recordBtn.addEventListener('click', () => {
if (recorder && recorder.state === 'recording') {
recorder.stop();
recordBtn.textContent = 'Start Recording';
recordBtn.classList.replace('slate', 'red');
} else {
startRecording();
}
});
window.addEventListener('resize', () => {
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
});
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
renderFeed();