add live audio
This commit is contained in:
parent
c79dc6b95f
commit
8c89d5abc2
8 changed files with 721 additions and 0 deletions
|
@ -523,6 +523,39 @@
|
|||
</div>
|
||||
<!-- ABOUT END -->
|
||||
<div id="timeline"><div id="timeline_head"></div><div>
|
||||
<!-- AUDIO BEGIN -->
|
||||
<div class="audioWrapper">
|
||||
<header>
|
||||
<h1>Voice-change-O-matic</h1>
|
||||
</header>
|
||||
|
||||
<canvas class="visualizer" width="640" height="100"></canvas>
|
||||
|
||||
<form class="controls">
|
||||
<div>
|
||||
<label for="voice">Voice setting</label>
|
||||
<select id="voice" name="voice">
|
||||
<option value="distortion">Distortion</option>
|
||||
<option value="convolver">Reverb</option>
|
||||
<option value="biquad">Bass Boost</option>
|
||||
<option value="delay">Echo Delay</option>
|
||||
<option value="off" selected>Off</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<label for="visual">Visualizer setting</label>
|
||||
<select id="visual" name="visual">
|
||||
<option value="sinewave">Sinewave</option>
|
||||
<option value="frequencybars" selected>Frequency bars</option>
|
||||
<option value="off">Off</option>
|
||||
</select>
|
||||
</div>
|
||||
<div>
|
||||
<a class="mute">Mute</a>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
<!-- AUDIO END -->
|
||||
<!-- EXPORT BEGIN -->
|
||||
<script id="ffmpeg.min.js" type="application/javascript" src="/web/ffmpeg_modules/ffmpeg.min.js"></script>
|
||||
<!-- EXPORT END -->
|
||||
|
|
8
bin/web/assets/sound.svg
Normal file
8
bin/web/assets/sound.svg
Normal file
|
@ -0,0 +1,8 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 26.0.2, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 16 16" style="enable-background:new 0 0 16 16;" xml:space="preserve">
|
||||
<path d="M15.6,11c0-0.1,0-0.1,0-0.2V0.3L5.8,2.7v9.2c-0.7-0.3-1.5-0.5-2.5-0.3c-1.8,0.3-3.1,1.4-2.9,2.6c0.2,1.2,1.8,1.9,3.5,1.6
|
||||
c1.5-0.2,2.7-1.1,2.9-2.1h0.1V3.6l7.6-1.9v8c-0.6-0.3-1.5-0.4-2.4-0.3C10.3,9.7,9,10.9,9.2,12c0.2,1.2,1.8,1.9,3.5,1.6
|
||||
C14.5,13.4,15.8,12.2,15.6,11z"/>
|
||||
</svg>
|
After Width: | Height: | Size: 628 B |
|
@ -926,3 +926,18 @@ h4{
|
|||
|
||||
|
||||
/* ABOUT END */
|
||||
.audioWrapper {
|
||||
position: absolute;
|
||||
left: 0px;
|
||||
bottom: 0px;
|
||||
z-index: 42000;
|
||||
background-color: rgba(255,125,125,0.5);
|
||||
opacity: 0;
|
||||
pointer-events: none;
|
||||
}
|
||||
.audioWrapper canvas.visualizer {
|
||||
border-top: 1px solid black;
|
||||
border-bottom: 1px solid black;
|
||||
margin-bottom: -3px;
|
||||
box-shadow: 0 -2px 4px rgba(0, 0, 0, 0.7), 0 3px 4px rgba(0, 0, 0, 0.7);
|
||||
}
|
||||
|
|
637
bin/web/js/audio.js
Normal file
637
bin/web/js/audio.js
Normal file
|
@ -0,0 +1,637 @@
|
|||
import {
|
||||
mapValue,
|
||||
mix,
|
||||
} from './utils.js';
|
||||
|
||||
window.mapValue = mapValue;
|
||||
|
||||
const Audio = function(tp, record) {
|
||||
const audioDom = document.querySelector('.audioWrapper');
|
||||
const heading = audioDom.querySelector("h1");
|
||||
heading.textContent = "CLICK HERE TO START";
|
||||
//document.body.addEventListener("click", init);
|
||||
let started = false;
|
||||
|
||||
const mapping = {};
|
||||
|
||||
const addAudioOptions = (layer, propTitle) => {
|
||||
const panelPropTitle = tp.getPanelPropTitle(propTitle);
|
||||
if (panelPropTitle === null) {
|
||||
console.log('Audio::addAudioOptions::error',`cannot find panelPropTitle "${propTitle}"`);
|
||||
return;
|
||||
}
|
||||
const container = tp.getPanelPropContainer(panelPropTitle);
|
||||
const mappingOptions = mapping[layer.id()][propTitle];
|
||||
const panel = tp.getPanel();
|
||||
const audioOptions = document.createElement('div');
|
||||
audioOptions.classList.add('audioOptions');
|
||||
audioOptions.classList.add('audioOptionsTypeDefault');
|
||||
audioOptions.classList.add(`audioOptions${propTitle}`);
|
||||
audioOptions.style.position = 'relative';
|
||||
audioOptions.style.width = '100%';
|
||||
audioOptions.style.background = 'rgba(0,255,255,0.2)';
|
||||
audioOptions.style.order = window.getComputedStyle(container).order;
|
||||
|
||||
mappingOptions.freq_min = 0;
|
||||
mappingOptions.freq_max = 256 * 8 / 2;
|
||||
|
||||
const updateMappingOptions = () => {
|
||||
mappingOptions.min_out = parseFloat(panel.querySelector(`#audio_min${propTitle}`).value);
|
||||
mappingOptions.max_out = parseFloat(panel.querySelector(`#audio_max${propTitle}`).value);
|
||||
mappingOptions.sync =
|
||||
panel.querySelector(`input[name="audio_sync${propTitle}"]:checked`).value;
|
||||
const s = panel.querySelector(`#audio_smoothing${propTitle}`).value;
|
||||
mappingOptions.smoothing = parseFloat(s);
|
||||
};
|
||||
|
||||
const min_max_Dom = document.createElement('div');
|
||||
min_max_Dom.classList.add('audio_min_max');
|
||||
const min_inputDom_label = document.createElement('label');
|
||||
min_inputDom_label.for = 'audio_min';
|
||||
min_inputDom_label.innerHTML = 'audio_min';
|
||||
const min_inputDom = document.createElement('input');
|
||||
min_inputDom.type = 'number';
|
||||
min_inputDom.name = `audio_min${propTitle}`;
|
||||
min_inputDom.id = `audio_min${propTitle}`;
|
||||
min_inputDom.value = '0';
|
||||
const max_inputDom_label = document.createElement('label');
|
||||
max_inputDom_label.for = 'audio_max';
|
||||
max_inputDom_label.innerHTML = 'audio_max';
|
||||
const max_inputDom = document.createElement('input');
|
||||
max_inputDom.type = 'number';
|
||||
max_inputDom.name = `audio_max${propTitle}`;
|
||||
max_inputDom.id = `audio_max${propTitle}`;
|
||||
max_inputDom.value = '255';
|
||||
const smoothing_inputDom_label = document.createElement('label');
|
||||
smoothing_inputDom_label.for = 'audio_smoothing';
|
||||
smoothing_inputDom_label.innerHTML = 'audio_smoothing';
|
||||
const smoothing_inputDom = document.createElement('input');
|
||||
smoothing_inputDom.type = 'number';
|
||||
smoothing_inputDom.name = `audio_smoothing${propTitle}`;
|
||||
smoothing_inputDom.id = `audio_smoothing${propTitle}`;
|
||||
smoothing_inputDom.value = config.audio.defaultSmoothing;
|
||||
smoothing_inputDom.min = 0;
|
||||
smoothing_inputDom.max = 1;
|
||||
smoothing_inputDom.step = 0.01;
|
||||
min_max_Dom.append(smoothing_inputDom_label);
|
||||
min_max_Dom.append(smoothing_inputDom);
|
||||
min_max_Dom.append(min_inputDom_label);
|
||||
min_max_Dom.append(min_inputDom);
|
||||
min_max_Dom.append(max_inputDom_label);
|
||||
min_max_Dom.append(max_inputDom);
|
||||
audioOptions.append(min_max_Dom);
|
||||
|
||||
const sync_Dom = document.createElement('div');
|
||||
const sync_titleDom = document.createElement('p');
|
||||
sync_titleDom.innerHTML = 'sync with:';
|
||||
sync_Dom.append(sync_titleDom);
|
||||
|
||||
const sync_options = ['volume', 'pitch', 'frequency'];
|
||||
sync_options.forEach((o, oi) => {
|
||||
const sync_inputDom_label = document.createElement('label');
|
||||
sync_inputDom_label.for = `audio_sync${o}`;
|
||||
sync_inputDom_label.innerHTML = o;
|
||||
const sync_inputDom = document.createElement('input');
|
||||
sync_inputDom.type = 'radio';
|
||||
sync_inputDom.name = `audio_sync${propTitle}`;
|
||||
sync_inputDom.id = `audio_sync${propTitle}${o}`;
|
||||
sync_inputDom.value = o;
|
||||
// default select first option
|
||||
if (oi === 0) {
|
||||
sync_inputDom.checked = '1';
|
||||
}
|
||||
sync_Dom.append(sync_inputDom_label);
|
||||
sync_Dom.append(sync_inputDom);
|
||||
sync_inputDom.addEventListener('change', updateMappingOptions);
|
||||
});
|
||||
audioOptions.append(sync_Dom);
|
||||
|
||||
const fft_Dom = document.createElement('div');
|
||||
const fft_imgDom = document.createElement('img');
|
||||
const fft_selectDom = document.createElement('div');
|
||||
fft_Dom.style.position = 'relative';
|
||||
fft_Dom.style.top = '0px';
|
||||
fft_Dom.style.left = '0px';
|
||||
fft_imgDom.classList.add('audio_fft');
|
||||
fft_imgDom.style.width = '100%';
|
||||
fft_imgDom.style.userDrag = 'none';
|
||||
fft_imgDom.style.userSelect = 'none';
|
||||
fft_imgDom.style.pointerEvents = 'none';
|
||||
fft_selectDom.style.position = 'absolute';
|
||||
fft_selectDom.style.top = '0px';
|
||||
fft_selectDom.style.left = '0px';
|
||||
fft_selectDom.style.width = '100%';
|
||||
fft_selectDom.style.height = '100%';
|
||||
fft_selectDom.style.pointerEvents = 'none';
|
||||
fft_selectDom.style.backgroundColor = 'rgba(0,255,0,0.2)';
|
||||
fft_selectDom.style.border = '1px solid rgba(0,255,0,1.0)';
|
||||
fft_Dom.append(fft_imgDom);
|
||||
fft_Dom.append(fft_selectDom);
|
||||
audioOptions.append(fft_Dom);
|
||||
min_inputDom.addEventListener('change', updateMappingOptions);
|
||||
max_inputDom.addEventListener('change', updateMappingOptions);
|
||||
smoothing_inputDom.addEventListener('change', updateMappingOptions);
|
||||
let setFrequency = false;
|
||||
let freq_down = 0;
|
||||
let freq_up = 0;
|
||||
fft_Dom.addEventListener('mousedown', (e) => {
|
||||
setFrequency = true;
|
||||
const bb = fft_Dom.getBoundingClientRect();
|
||||
const x = e.clientX - bb.x;
|
||||
freq_down = mapValue(e.clientX, bb.x, bb.x + bb.width, 0, 256 * 8 / 2, true);
|
||||
});
|
||||
fft_Dom.addEventListener('mouseup', (e) => {
|
||||
setFrequency = false;
|
||||
const bb = fft_Dom.getBoundingClientRect();
|
||||
const x = e.clientX - bb.x;
|
||||
freq_down = mapValue(e.clientX, bb.x, bb.x + bb.width, 0, 256 * 8 / 2, true);
|
||||
console.log('up',JSON.parse(JSON.stringify(e)), e);
|
||||
});
|
||||
|
||||
//removeAudioOptions();
|
||||
container.after(audioOptions);
|
||||
|
||||
updateMappingOptions();
|
||||
mappingOptions.value = mappingOptions.min_out;
|
||||
};
|
||||
|
||||
const removeAudioOptions = (propTitle = '') => {
|
||||
const panel = tp.getPanel();
|
||||
if (propTitle === '') {
|
||||
const otherAudioOptions = panel.querySelectorAll('.audioOptions');
|
||||
if (otherAudioOptions !== null) {
|
||||
for (let i = 0; i < otherAudioOptions.length; i++) {
|
||||
otherAudioOptions[i].remove();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const audioOptions = panel.querySelector(`.audioOptions${propTitle}`);
|
||||
if (audioOptions !== null) {
|
||||
audioOptions.remove();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const addAudioButton = (layer, propTitle, isActive) => {
|
||||
const panel = tp.getPanel();
|
||||
const panelPropTitle = tp.getPanelPropTitle(propTitle);
|
||||
if (panelPropTitle !== null) {
|
||||
const container = tp.getPanelPropContainer(panelPropTitle);
|
||||
|
||||
if (container === null) {
|
||||
console.log("Audio::addAudioButton",
|
||||
`impossible! cannot find panelPropContainer for ${propTitle}`);
|
||||
} else if (container.querySelector('.audioButton') !== null) {
|
||||
console.log("Audio::addAudioButton",
|
||||
`already added an audio button for ${propTitle}`);
|
||||
} else {
|
||||
const button = document.createElement('div');
|
||||
button.classList.add('audioButton');
|
||||
button.classList.add(`audioButton${propTitle}`);
|
||||
button.innerHTML = `<img src="/web/assets/sound.svg" alt="audio" />`;
|
||||
container.append(button);
|
||||
button.addEventListener('click', () => {
|
||||
if (!started) {
|
||||
init();
|
||||
}
|
||||
if (!mapping.hasOwnProperty(layer.id())) {
|
||||
mapping[layer.id()] = {};
|
||||
}
|
||||
if (!mapping[layer.id()].hasOwnProperty(propTitle)) {
|
||||
mapping[layer.id()][propTitle] = {};
|
||||
button.classList.add('active');
|
||||
addAudioOptions(layer, propTitle);
|
||||
} else {
|
||||
delete mapping[layer.id()][propTitle];
|
||||
if (Object.keys(mapping[layer.id()]).length === 0) {
|
||||
delete mapping[layer.id()];
|
||||
}
|
||||
button.classList.remove('active');
|
||||
removeAudioOptions(propTitle);
|
||||
}
|
||||
});
|
||||
if (isActive) {
|
||||
button.classList.add('active');
|
||||
addAudioOptions(layer, propTitle);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
console.log("Audio::addAudioButton",
|
||||
`cannot find panelPropTitle for ${propTitle}`);
|
||||
}
|
||||
};
|
||||
|
||||
const injectPanel = (layer) => {
|
||||
const props = Object.keys(layer.theatreObject.value);
|
||||
props.forEach((propTitle) => {
|
||||
if (config.audio.ignoreProps.indexOf(propTitle) < 0) {
|
||||
let isActive = false;
|
||||
if (mapping.hasOwnProperty(layer.id())) {
|
||||
if (mapping[layer.id()].hasOwnProperty(propTitle)) {
|
||||
isActive = true;
|
||||
}
|
||||
}
|
||||
addAudioButton(layer, propTitle, isActive);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
function init() {
|
||||
started = true;
|
||||
heading.textContent = "Voice-change-O-matic";
|
||||
//document.body.removeEventListener("click", init);
|
||||
|
||||
// Older browsers might not implement mediaDevices at all, so we set an empty object first
|
||||
if (navigator.mediaDevices === undefined) {
|
||||
navigator.mediaDevices = {};
|
||||
}
|
||||
|
||||
// Some browsers partially implement mediaDevices. We can't assign an object
|
||||
// with getUserMedia as it would overwrite existing properties.
|
||||
// Add the getUserMedia property if it's missing.
|
||||
if (navigator.mediaDevices.getUserMedia === undefined) {
|
||||
navigator.mediaDevices.getUserMedia = function(constraints) {
|
||||
// First get ahold of the legacy getUserMedia, if present
|
||||
const getUserMedia =
|
||||
navigator.webkitGetUserMedia ||
|
||||
navigator.mozGetUserMedia ||
|
||||
navigator.msGetUserMedia;
|
||||
|
||||
// Some browsers just don't implement it - return a rejected promise with an error
|
||||
// to keep a consistent interface
|
||||
if (!getUserMedia) {
|
||||
return Promise.reject(
|
||||
new Error("getUserMedia is not implemented in this browser")
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise, wrap the call to the old navigator.getUserMedia with a Promise
|
||||
return new Promise(function(resolve, reject) {
|
||||
getUserMedia.call(navigator, constraints, resolve, reject);
|
||||
});
|
||||
};
|
||||
}
|
||||
|
||||
// Set up forked web audio context, for multiple browsers
|
||||
// window. is needed otherwise Safari explodes
|
||||
const audioCtx = new(window.AudioContext || window.webkitAudioContext)();
|
||||
const voiceSelect = audioDom.querySelector("#voice");
|
||||
let source;
|
||||
let stream;
|
||||
|
||||
// Grab the mute button to use below
|
||||
const mute = audioDom.querySelector(".mute");
|
||||
|
||||
// Set up the different audio nodes we will use for the app
|
||||
const analyser = audioCtx.createAnalyser();
|
||||
analyser.minDecibels = -90;
|
||||
analyser.maxDecibels = -10;
|
||||
analyser.smoothingTimeConstant = 0.85;
|
||||
window.analyser = analyser;
|
||||
|
||||
const distortion = audioCtx.createWaveShaper();
|
||||
const gainNode = audioCtx.createGain();
|
||||
const biquadFilter = audioCtx.createBiquadFilter();
|
||||
const convolver = audioCtx.createConvolver();
|
||||
|
||||
const echoDelay = createEchoDelayEffect(audioCtx);
|
||||
|
||||
// Distortion curve for the waveshaper, thanks to Kevin Ennis
|
||||
// http://stackoverflow.com/questions/22312841/waveshaper-node-in-webaudio-how-to-emulate-distortion
|
||||
function makeDistortionCurve(amount) {
|
||||
let k = typeof amount === "number" ? amount : 50,
|
||||
n_samples = 44100,
|
||||
curve = new Float32Array(n_samples),
|
||||
deg = Math.PI / 180,
|
||||
i = 0,
|
||||
x;
|
||||
for (; i < n_samples; ++i) {
|
||||
x = (i * 2) / n_samples - 1;
|
||||
curve[i] = ((3 + k) * x * 20 * deg) / (Math.PI + k * Math.abs(x));
|
||||
}
|
||||
return curve;
|
||||
}
|
||||
|
||||
// Grab audio track via XHR for convolver node
|
||||
let soundSource;
|
||||
const ajaxRequest = new XMLHttpRequest();
|
||||
|
||||
ajaxRequest.open(
|
||||
"GET",
|
||||
"https://mdn.github.io/voice-change-o-matic/audio/concert-crowd.ogg",
|
||||
true
|
||||
);
|
||||
|
||||
ajaxRequest.responseType = "arraybuffer";
|
||||
|
||||
ajaxRequest.onload = function() {
|
||||
const audioData = ajaxRequest.response;
|
||||
|
||||
audioCtx.decodeAudioData(
|
||||
audioData,
|
||||
function(buffer) {
|
||||
soundSource = audioCtx.createBufferSource();
|
||||
convolver.buffer = buffer;
|
||||
},
|
||||
function(e) {
|
||||
console.log("Error with decoding audio data" + e.err);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
ajaxRequest.send();
|
||||
|
||||
// Set up canvas context for visualizer
|
||||
const canvas = audioDom.querySelector(".visualizer");
|
||||
const canvasCtx = canvas.getContext("2d");
|
||||
|
||||
const intendedWidth = audioDom.clientWidth;
|
||||
canvas.setAttribute("width", 256 * 8 / 2);
|
||||
const visualSelect = audioDom.querySelector("#visual");
|
||||
let drawVisual;
|
||||
|
||||
// Main block for doing the audio recording
|
||||
if (navigator.mediaDevices.getUserMedia) {
|
||||
console.log("getUserMedia supported.");
|
||||
const constraints = {
|
||||
audio: true
|
||||
};
|
||||
navigator.mediaDevices
|
||||
.getUserMedia(constraints)
|
||||
.then(function(stream) {
|
||||
source = audioCtx.createMediaStreamSource(stream);
|
||||
source.connect(distortion);
|
||||
distortion.connect(biquadFilter);
|
||||
biquadFilter.connect(gainNode);
|
||||
convolver.connect(gainNode);
|
||||
echoDelay.placeBetween(gainNode, analyser);
|
||||
analyser.connect(audioCtx.destination);
|
||||
|
||||
visualize();
|
||||
voiceChange();
|
||||
})
|
||||
.catch(function(err) {
|
||||
console.log("The following gUM error occured: " + err);
|
||||
});
|
||||
} else {
|
||||
console.log("getUserMedia not supported on your browser!");
|
||||
}
|
||||
|
||||
function visualize() {
|
||||
const WIDTH = canvas.width;
|
||||
const HEIGHT = canvas.height;
|
||||
|
||||
const visualSetting = visualSelect.value;
|
||||
|
||||
if (visualSetting === "sinewave") {
|
||||
analyser.fftSize = 2048;
|
||||
const bufferLength = analyser.fftSize;
|
||||
|
||||
// We can use Float32Array instead of Uint8Array if we want higher precision
|
||||
// const dataArray = new Float32Array(bufferLength);
|
||||
const dataArray = new Uint8Array(bufferLength);
|
||||
|
||||
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
|
||||
|
||||
const draw = function() {
|
||||
drawVisual = requestAnimationFrame(draw);
|
||||
|
||||
analyser.getByteTimeDomainData(dataArray);
|
||||
|
||||
canvasCtx.fillStyle = "rgb(200, 200, 200)";
|
||||
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
|
||||
|
||||
canvasCtx.lineWidth = 2;
|
||||
canvasCtx.strokeStyle = "rgb(0, 0, 0)";
|
||||
|
||||
canvasCtx.beginPath();
|
||||
|
||||
const sliceWidth = (WIDTH * 1.0) / bufferLength;
|
||||
let x = 0;
|
||||
|
||||
for (let i = 0; i < bufferLength; i++) {
|
||||
let v = dataArray[i] / 128.0;
|
||||
let y = (v * HEIGHT) / 2;
|
||||
|
||||
if (i === 0) {
|
||||
canvasCtx.moveTo(x, y);
|
||||
} else {
|
||||
canvasCtx.lineTo(x, y);
|
||||
}
|
||||
|
||||
x += sliceWidth;
|
||||
}
|
||||
|
||||
canvasCtx.lineTo(canvas.width, canvas.height / 2);
|
||||
canvasCtx.stroke();
|
||||
};
|
||||
|
||||
draw();
|
||||
} else if (visualSetting == "frequencybars") {
|
||||
analyser.fftSize = 256 * 8;
|
||||
const bufferLengthAlt = analyser.frequencyBinCount / 2;
|
||||
|
||||
// See comment above for Float32Array()
|
||||
const dataArrayAlt = new Uint8Array(bufferLengthAlt);
|
||||
|
||||
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
|
||||
|
||||
let frameCount = 0;
|
||||
const drawAlt = function() {
|
||||
drawVisual = requestAnimationFrame(drawAlt);
|
||||
|
||||
analyser.getByteFrequencyData(dataArrayAlt);
|
||||
|
||||
canvasCtx.fillStyle = "rgb(0, 0, 0)";
|
||||
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
|
||||
|
||||
const barWidth = (WIDTH / bufferLengthAlt) * 2.5;
|
||||
let barHeight;
|
||||
let x = 0;
|
||||
|
||||
let max_i = 0;
|
||||
let max_v = 0;
|
||||
for (let i = 0; i < bufferLengthAlt; i++) {
|
||||
barHeight = dataArrayAlt[i];
|
||||
|
||||
if (barHeight > max_v) {
|
||||
max_v = barHeight;
|
||||
max_i = i;
|
||||
}
|
||||
canvasCtx.fillStyle = "rgb(" + (barHeight + 100) + ",50,50)";
|
||||
canvasCtx.fillRect(
|
||||
x,
|
||||
HEIGHT - barHeight / 2,
|
||||
barWidth,
|
||||
barHeight / 2
|
||||
);
|
||||
|
||||
x += barWidth + 1;
|
||||
}
|
||||
const propsToSet = [];
|
||||
getLayers().forEach((layer) => {
|
||||
if (mapping.hasOwnProperty(layer.id())) {
|
||||
Object.keys(mapping[layer.id()]).forEach((propTitle) => {
|
||||
const m = mapping[layer.id()][propTitle];
|
||||
switch(m.sync) {
|
||||
case 'volume': {
|
||||
let a = mapValue(max_v, 0, 255, m.min_out, m.max_out, true);
|
||||
m.value = m.value * m.smoothing + (1.0 - m.smoothing) * a;
|
||||
propsToSet.push({
|
||||
prop: layer.theatreObject.props[propTitle],
|
||||
value: m.value,
|
||||
});
|
||||
break;
|
||||
}
|
||||
case 'pitch': {
|
||||
let a = mapValue(max_i, 0, bufferLengthAlt-1, m.min_out, m.max_out, true);
|
||||
m.value = m.value * m.smoothing + (1.0 - m.smoothing) * a;
|
||||
propsToSet.push({
|
||||
prop: layer.theatreObject.props[propTitle],
|
||||
value: m.value,
|
||||
});
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (m.sync === 'volume') {
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
if (propsToSet.length > 0 && frameCount % 2 === 0) {
|
||||
tp.studio.transaction(({
|
||||
set
|
||||
}) => {
|
||||
propsToSet.forEach((p) => {
|
||||
set(p.prop, p.value, true);
|
||||
});
|
||||
});
|
||||
}
|
||||
const panel = tp.getPanel();
|
||||
const fft_images = panel.querySelectorAll('.audio_fft');
|
||||
if (fft_images !== null) {
|
||||
const src = canvas.toDataURL();
|
||||
if (window.printDebug === true) {
|
||||
console.log({canvas, src, fft_images, panel}, "DEBUG AUDIO");
|
||||
}
|
||||
fft_images.forEach((e) => {
|
||||
e.src = src;
|
||||
});
|
||||
}
|
||||
frameCount++;
|
||||
};
|
||||
drawAlt();
|
||||
} else if (visualSetting == "off") {
|
||||
canvasCtx.clearRect(0, 0, WIDTH, HEIGHT);
|
||||
canvasCtx.fillStyle = "red";
|
||||
canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);
|
||||
}
|
||||
}
|
||||
|
||||
function voiceChange() {
|
||||
distortion.oversample = "4x";
|
||||
biquadFilter.gain.setTargetAtTime(0, audioCtx.currentTime, 0);
|
||||
|
||||
const voiceSetting = voiceSelect.value;
|
||||
|
||||
if (echoDelay.isApplied()) {
|
||||
echoDelay.discard();
|
||||
}
|
||||
|
||||
// When convolver is selected it is connected back into the audio path
|
||||
if (voiceSetting == "convolver") {
|
||||
biquadFilter.disconnect(0);
|
||||
biquadFilter.connect(convolver);
|
||||
} else {
|
||||
biquadFilter.disconnect(0);
|
||||
biquadFilter.connect(gainNode);
|
||||
|
||||
if (voiceSetting == "distortion") {
|
||||
distortion.curve = makeDistortionCurve(400);
|
||||
} else if (voiceSetting == "biquad") {
|
||||
biquadFilter.type = "lowshelf";
|
||||
biquadFilter.frequency.setTargetAtTime(1000, audioCtx.currentTime, 0);
|
||||
biquadFilter.gain.setTargetAtTime(25, audioCtx.currentTime, 0);
|
||||
} else if (voiceSetting == "delay") {
|
||||
echoDelay.apply();
|
||||
} else if (voiceSetting == "off") {
|
||||
console.log("Voice settings turned off");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createEchoDelayEffect(audioContext) {
|
||||
const delay = audioContext.createDelay(1);
|
||||
const dryNode = audioContext.createGain();
|
||||
const wetNode = audioContext.createGain();
|
||||
const mixer = audioContext.createGain();
|
||||
const filter = audioContext.createBiquadFilter();
|
||||
|
||||
delay.delayTime.value = 0.75;
|
||||
dryNode.gain.value = 1;
|
||||
wetNode.gain.value = 0;
|
||||
filter.frequency.value = 1100;
|
||||
filter.type = "highpass";
|
||||
|
||||
return {
|
||||
apply: function() {
|
||||
wetNode.gain.setValueAtTime(0.75, audioContext.currentTime);
|
||||
},
|
||||
discard: function() {
|
||||
wetNode.gain.setValueAtTime(0, audioContext.currentTime);
|
||||
},
|
||||
isApplied: function() {
|
||||
return wetNode.gain.value > 0;
|
||||
},
|
||||
placeBetween: function(inputNode, outputNode) {
|
||||
inputNode.connect(delay);
|
||||
delay.connect(wetNode);
|
||||
wetNode.connect(filter);
|
||||
filter.connect(delay);
|
||||
|
||||
inputNode.connect(dryNode);
|
||||
dryNode.connect(mixer);
|
||||
wetNode.connect(mixer);
|
||||
mixer.connect(outputNode);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Event listeners to change visualize and voice settings
|
||||
visualSelect.onchange = function() {
|
||||
window.cancelAnimationFrame(drawVisual);
|
||||
visualize();
|
||||
};
|
||||
|
||||
voiceSelect.onchange = function() {
|
||||
voiceChange();
|
||||
};
|
||||
|
||||
mute.onclick = voiceMute;
|
||||
|
||||
let previousGain;
|
||||
|
||||
function voiceMute() {
|
||||
if (mute.id === "") {
|
||||
previousGain = gainNode.gain.value;
|
||||
gainNode.gain.value = 0;
|
||||
mute.id = "activated";
|
||||
mute.innerHTML = "Unmute";
|
||||
} else {
|
||||
gainNode.gain.value = previousGain;
|
||||
mute.id = "";
|
||||
mute.innerHTML = "Mute";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
this.init = init;
|
||||
this.injectPanel = injectPanel;
|
||||
this.mapping = mapping;
|
||||
};
|
||||
|
||||
export {
|
||||
Audio
|
||||
}
|
|
@ -81,6 +81,10 @@ const config = {
|
|||
zoomBaseFactor: 0.001,
|
||||
zoomDynamicMax: 42,
|
||||
},
|
||||
audio: {
|
||||
ignoreProps: ['transformOrigin', 'fontFamily', 'text', 'mirror_x', 'mirror_y', 'mirror_xy', 'fontVariationAxes', 'color'],
|
||||
defaultSmoothing: 0.7,
|
||||
},
|
||||
midi: {
|
||||
touchTimeThreshold_s: 0.5,
|
||||
smoothingMix: 0.1,
|
||||
|
|
|
@ -792,6 +792,12 @@ const Layer = function(tp, layerID, fontsAndAxes, autoInit = true) {
|
|||
panel.addEventListener("mouseover", showBoundingBoxDivIfSelected);
|
||||
panel.addEventListener("mouseleave", hideBoundingBoxDiv);
|
||||
|
||||
if (typeof audio === 'object' && audio.hasOwnProperty('injectPanel')) {
|
||||
audio.injectPanel(this);
|
||||
} else {
|
||||
console.log('Layer::findInjectPanel', `cannot inject audio panel for ${this.id()} for some reason.`);
|
||||
}
|
||||
|
||||
injectedPanel = true;
|
||||
const detail = {titles: Object.keys(panelPropTitles), containers: Object.keys(panelPropContainers)};
|
||||
const e = new CustomEvent('injected', {detail});
|
||||
|
|
|
@ -26,6 +26,14 @@ import {
|
|||
ModuleFS
|
||||
} from './moduleFS.js';
|
||||
|
||||
import {
|
||||
Audio
|
||||
} from './audio.js';
|
||||
|
||||
import {
|
||||
Record
|
||||
} from './record.js';
|
||||
|
||||
//import {
|
||||
//MidiController
|
||||
//} from './midiController.js';
|
||||
|
@ -63,6 +71,10 @@ const exporter = new Exporter();
|
|||
const interactor = new Interactor();
|
||||
const moduleFS = new ModuleFS();
|
||||
window.moduleFS = moduleFS;
|
||||
const record = new Record(tp);
|
||||
window.debug_record = record;
|
||||
const audio = new Audio(tp, record); // possibly nicer if we pass tp instead of attaching to window
|
||||
window.audio = audio;
|
||||
|
||||
window.panelFinderTimeout = false;
|
||||
const sequenceEventBuffer = {};
|
||||
|
|
|
@ -569,6 +569,12 @@ const TheatrePlay = function(autoInit = false) {
|
|||
align-self: end;
|
||||
margin-top: 5px;
|
||||
}
|
||||
.audioButton{
|
||||
width: 20px;
|
||||
}
|
||||
.audioButton.active{
|
||||
background: green;
|
||||
}
|
||||
`;
|
||||
this.shadowRoot.appendChild(style);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue