mute, letterDelay and other fixes
This commit is contained in:
parent
748af243fa
commit
0ca73bca05
4 changed files with 158 additions and 223 deletions
|
@ -32,7 +32,7 @@ const Audio = function(tp, record) {
|
|||
heading.textContent = "CLICK HERE TO START";
|
||||
|
||||
// an array of possible sync options.
|
||||
const audio_sync_options = ['volume', 'pitch', 'frequency'];
|
||||
const audio_sync_options = ['volume', 'pitch', 'clarity'];
|
||||
// could also be an enum
|
||||
// like that
|
||||
//const AudioSyncOptions = Object.freeze({
|
||||
|
@ -139,8 +139,8 @@ const Audio = function(tp, record) {
|
|||
b.min_out = mm[0];
|
||||
b.max_out = mm[1];
|
||||
const a = new AudioMappingOptions();
|
||||
a.min_out = mm[0];
|
||||
a.max_out = mm[1];
|
||||
a.min_out = 1.0; // NOTE: dirty, dirty
|
||||
a.max_out = 1.0; // hardcoded value, you
|
||||
return [{r}, {g}, {b}, {a}];
|
||||
} else {
|
||||
const o = new AudioMappingOptions();
|
||||
|
@ -219,10 +219,11 @@ const Audio = function(tp, record) {
|
|||
|
||||
const createAudioOptions = (layer, propTitle, container) => {
|
||||
const mappingOptions = mapping[layer.id()][propTitle];
|
||||
let hasLetterDelay = config
|
||||
let hasLetterDelay = //false;
|
||||
config
|
||||
.layer.letterDelayProps
|
||||
.indexOf(propTitle.split('.')[0]) >= 0
|
||||
&& tp.isSequenced([...[layer.id()], ...propTitle.split('.')]);
|
||||
.indexOf(propTitle.split('.')[0]) >= 0 && propTitle.indexOf('color') < 0;
|
||||
//&& tp.isSequenced([...[layer.id()], ...propTitle.split('.')]);
|
||||
const panel = tp.getPanel();
|
||||
if (!areMutationsObserved) {
|
||||
mutationObserver.observe(panel, { childList: true, subtree: true });
|
||||
|
@ -273,6 +274,7 @@ const Audio = function(tp, record) {
|
|||
mappingOptions.letterDelay = typeof ld.value === 'number' ? ld.value : parseInt(ld.value);
|
||||
}
|
||||
mappingOptions.source = panel.querySelector(toCssClass(`audio_source${propTitle}`,'#')).value;
|
||||
mappingOptions.muted = panel.querySelector(toCssClass(`audio_mute${propTitle}`,'#')).checked;
|
||||
};
|
||||
|
||||
const source_Dom = document.createElement('select');
|
||||
|
@ -286,12 +288,27 @@ const Audio = function(tp, record) {
|
|||
if (file[0] !== '.') {
|
||||
const source_file = document.createElement('option');
|
||||
source_file.value = file;
|
||||
if (file.length > config.audio.maxFilenameLength) {
|
||||
source_file.innerHTML = file.substr(0,6) + '..' + file.substr(file.length - 6, 6);
|
||||
} else {
|
||||
source_file.innerHTML = file;
|
||||
}
|
||||
source_Dom.append(source_file);
|
||||
}
|
||||
});
|
||||
audioOptions.append(source_Dom);
|
||||
|
||||
const muteDom = document.createElement('input');
|
||||
const muteDom_label = document.createElement('label');
|
||||
muteDom.id = toCssClass(`audio_mute${propTitle}`);
|
||||
muteDom.name = toCssClass(`audio_mute${propTitle}`);
|
||||
muteDom.type = 'checkbox';
|
||||
muteDom.checked = true;
|
||||
muteDom_label.for = toCssClass(`audio_mute${propTitle}`);
|
||||
muteDom_label.innerHTML = 'muted';
|
||||
audioOptions.append(muteDom);
|
||||
audioOptions.append(muteDom_label);
|
||||
|
||||
const min_max_Dom = document.createElement('div');
|
||||
min_max_Dom.classList.add('audio_min_max');
|
||||
const min_Cont = document.createElement('div');
|
||||
|
@ -412,6 +429,7 @@ const Audio = function(tp, record) {
|
|||
fft_Dom.append(fft_selectDom);
|
||||
audioOptions.append(fft_Dom);
|
||||
source_Dom.addEventListener('change', updateMappingOptions);
|
||||
muteDom.addEventListener('change', updateMappingOptions);
|
||||
min_inputDom.addEventListener('change', updateMappingOptions);
|
||||
max_inputDom.addEventListener('change', updateMappingOptions);
|
||||
smoothing_inputDom.addEventListener('change', updateMappingOptions);
|
||||
|
@ -609,10 +627,10 @@ const Audio = function(tp, record) {
|
|||
}
|
||||
});
|
||||
};
|
||||
const audioSourceCombo = {};
|
||||
const audioSourceCombos = {};
|
||||
const readAudioFiles = () => {
|
||||
FS.readdir(config.fs.idbfsAudioDir).forEach((file) => {
|
||||
if (file.indexOf('.') !== 0 && !audioSourceCombo.hasOwnProperty(file)) {
|
||||
if (file.indexOf('.') !== 0 && !audioSourceCombos.hasOwnProperty(file)) {
|
||||
const audioElement = document.createElement('audio');
|
||||
audioElement.classList.add('invisible');
|
||||
audioElement.classList.add('audio_file');
|
||||
|
@ -641,12 +659,12 @@ const Audio = function(tp, record) {
|
|||
audioElement.loop = true;
|
||||
|
||||
const source = audioCtx.createMediaElementSource(audioElement);
|
||||
source.connect(audioCtx.destination);
|
||||
const analyser = audioCtx.createAnalyser();
|
||||
analyser.minDecibels = -90;
|
||||
analyser.maxDecibels = -10;
|
||||
analyser.smoothingTimeConstant = 0.85;
|
||||
analyser.fftSize = config.audio.fftBandsAnalysed;
|
||||
const gain = audioCtx.createGain();
|
||||
gain.gain.value = 0;
|
||||
source.connect(gain);
|
||||
gain.connect(audioCtx.destination);
|
||||
//source.connect(audioCtx.destination);
|
||||
const analyser = new AnalyserNode(audioCtx, config.audio.analyser);
|
||||
const bufferLength = analyser.frequencyBinCount / 2;
|
||||
const dataArray = new Uint8Array(bufferLength);
|
||||
|
||||
|
@ -654,7 +672,9 @@ const Audio = function(tp, record) {
|
|||
|
||||
audioElement.play();
|
||||
|
||||
audioSourceCombo[file] = {
|
||||
audioSourceCombos[file] = {
|
||||
gain,
|
||||
source,
|
||||
dataArray,
|
||||
analyser,
|
||||
audioElement,
|
||||
|
@ -709,25 +729,22 @@ const Audio = function(tp, record) {
|
|||
// window. is needed otherwise Safari explodes
|
||||
audioCtx = new(window.AudioContext || window.webkitAudioContext)();
|
||||
const voiceSelect = audioDom.querySelector("#voice");
|
||||
let source;
|
||||
let stream;
|
||||
|
||||
// Grab the mute button to use below
|
||||
const mute = audioDom.querySelector(".mute");
|
||||
|
||||
// Set up the different audio nodes we will use for the app
|
||||
const analyser = audioCtx.createAnalyser();
|
||||
analyser.minDecibels = -90;
|
||||
analyser.maxDecibels = -10;
|
||||
analyser.smoothingTimeConstant = 0.85;
|
||||
analyser.fftSize = config.audio.fftBandsAnalysed;
|
||||
{
|
||||
const analyser = new AnalyserNode(audioCtx, config.audio.analyser);
|
||||
const bufferLength = analyser.frequencyBinCount / 2;
|
||||
|
||||
audioSourceCombo['microphone'] = {
|
||||
audioSourceCombos['microphone'] = {
|
||||
// source: see below when we actually get the microphone
|
||||
analyser,
|
||||
dataArray: new Uint8Array(bufferLength),
|
||||
audioElement: null,
|
||||
};
|
||||
}
|
||||
|
||||
readAudioFiles();
|
||||
|
||||
|
@ -754,34 +771,6 @@ const Audio = function(tp, record) {
|
|||
return curve;
|
||||
}
|
||||
|
||||
// Grab audio track via XHR for convolver node
|
||||
let soundSource;
|
||||
const ajaxRequest = new XMLHttpRequest();
|
||||
|
||||
ajaxRequest.open(
|
||||
"GET",
|
||||
"https://mdn.github.io/voice-change-o-matic/audio/concert-crowd.ogg",
|
||||
true
|
||||
);
|
||||
|
||||
ajaxRequest.responseType = "arraybuffer";
|
||||
|
||||
ajaxRequest.onload = function() {
|
||||
const audioData = ajaxRequest.response;
|
||||
|
||||
audioCtx.decodeAudioData(
|
||||
audioData,
|
||||
function(buffer) {
|
||||
soundSource = audioCtx.createBufferSource();
|
||||
},
|
||||
function(e) {
|
||||
console.log("Audio::audioCtx.decodeAudioData", "Error with decoding audio data" + e.err);
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
ajaxRequest.send();
|
||||
|
||||
// Set up canvas context for visualizer
|
||||
const canvas = audioDom.querySelector(".visualizer");
|
||||
const canvasCtx = canvas.getContext("2d");
|
||||
|
@ -801,8 +790,14 @@ const Audio = function(tp, record) {
|
|||
navigator.mediaDevices
|
||||
.getUserMedia(constraints)
|
||||
.then(function(stream) {
|
||||
source = audioCtx.createMediaStreamSource(stream);
|
||||
source.connect(analyser);
|
||||
const source = audioCtx.createMediaStreamSource(stream);
|
||||
const gain = audioCtx.createGain();
|
||||
gain.gain.value = 0;
|
||||
source.connect(gain);
|
||||
gain.connect(audioCtx.destination);
|
||||
source.connect(audioSourceCombos['microphone'].analyser);
|
||||
audioSourceCombos['microphone'].source = source;
|
||||
audioSourceCombos['microphone'].gain = gain;
|
||||
|
||||
visualize();
|
||||
})
|
||||
|
@ -819,11 +814,8 @@ const Audio = function(tp, record) {
|
|||
const w = config.audio.fftBandsUsed;
|
||||
const h = config.audio.fftHeight;
|
||||
const verticalFactor = h / 256.0;
|
||||
const bufferLengthAlt = analyser.frequencyBinCount / 2;
|
||||
|
||||
// See comment above for Float32Array()
|
||||
const dataArrayAlt = new Uint8Array(bufferLengthAlt);
|
||||
|
||||
let canvasKeys = Object.keys(canvasCombos);
|
||||
|
||||
for (let i = 0; i < canvasKeys.length; i++) {
|
||||
|
@ -853,7 +845,7 @@ const Audio = function(tp, record) {
|
|||
const sh = (m.max_in - m.min_in) * verticalFactor;
|
||||
canvasCombos[k][1].fillStyle = "rgb(80, 80, 80)"; // AUDIO COLOR
|
||||
canvasCombos[k][1].fillRect(sx, sy, sw, sh);
|
||||
} else if (m.sync === 'pitch') {
|
||||
} else if (m.sync === 'pitch' || m.sync === 'clarity') {
|
||||
const sx = m.min_freq;
|
||||
const sw = m.max_freq - m.min_freq;
|
||||
const sy = 0;
|
||||
|
@ -863,13 +855,18 @@ const Audio = function(tp, record) {
|
|||
}
|
||||
});
|
||||
|
||||
//analyser.getByteFrequencyData(dataArrayAlt);
|
||||
const usedSourceCombos = [];
|
||||
const analysedResults = {};
|
||||
const unmuted = [];
|
||||
Object.keys(mapping).forEach((layerID) => {
|
||||
Object.keys(mapping[layerID]).forEach((propTitle) => {
|
||||
const m = mapping[layerID][propTitle];
|
||||
const source = m.source;
|
||||
if (!m.muted) {
|
||||
if (unmuted.indexOf(source) < 0) {
|
||||
unmuted.push(source);
|
||||
}
|
||||
}
|
||||
if (usedSourceCombos.indexOf(source) < 0) {
|
||||
usedSourceCombos.push(source);
|
||||
analysedResults[source] = {
|
||||
|
@ -887,8 +884,8 @@ const Audio = function(tp, record) {
|
|||
analysedResults[source].mappings.push(m);
|
||||
});
|
||||
});
|
||||
Object.keys(audioSourceCombo).forEach((k) => {
|
||||
const asc = audioSourceCombo[k];
|
||||
Object.keys(audioSourceCombos).forEach((k) => {
|
||||
const asc = audioSourceCombos[k];
|
||||
if (asc.audioElement !== null) {
|
||||
if (usedSourceCombos.indexOf(k) >= 0) {
|
||||
if (positionRollover || asc.audioElement.paused) {
|
||||
|
@ -899,9 +896,14 @@ const Audio = function(tp, record) {
|
|||
asc.audioElement.pause();
|
||||
}
|
||||
}
|
||||
if (unmuted.indexOf(k) < 0) {
|
||||
asc.gain.gain.value = 0;
|
||||
} else {
|
||||
asc.gain.gain.value = 1;
|
||||
}
|
||||
});
|
||||
usedSourceCombos.forEach((source) => {
|
||||
const afs = audioSourceCombo[source];
|
||||
const afs = audioSourceCombos[source];
|
||||
const r = analysedResults[source];
|
||||
afs.analyser.getByteFrequencyData(afs.dataArray);
|
||||
for (let f = 0; f < w; f++) {
|
||||
|
@ -914,16 +916,19 @@ const Audio = function(tp, record) {
|
|||
r.max_ri += v * f;
|
||||
let fillStyle = 'rgb(200,200,200)';
|
||||
for (let k_i = 0; k_i < canvasKeys.length; k_i++) {
|
||||
// NOTE: this is not the most efficient way to do it
|
||||
const k = canvasKeys[k_i];
|
||||
const x = f;
|
||||
const layerID = canvasCombos[k][2];
|
||||
if (mapping[layerID][k].source === source) {
|
||||
canvasCombos[k][1].fillStyle = fillStyle;
|
||||
canvasCombos[k][1].fillRect(
|
||||
x,
|
||||
f,
|
||||
h - (v * verticalFactor),
|
||||
1,
|
||||
(v * verticalFactor)
|
||||
);
|
||||
}
|
||||
}
|
||||
analysedResults[source].mappings.forEach((m) => {
|
||||
if (m.min_freq <= f && m.max_freq >= f) {
|
||||
m.total_v += v;
|
||||
|
@ -952,7 +957,7 @@ const Audio = function(tp, record) {
|
|||
canvasCombos[k][1].lineWidth = 1; // AUDIO COLOR
|
||||
canvasCombos[k][1].strokeStyle = "rgb(255,255,255)"; // AUDIO COLOR
|
||||
canvasCombos[k][1].strokeRect(sx, sy, sw, sh);
|
||||
} else if (m.sync === 'pitch') {
|
||||
} else if (m.sync === 'pitch' || m.sync === 'clarity') {
|
||||
const sx = m.min_freq;
|
||||
const sw = m.max_freq - m.min_freq;
|
||||
const sy = 0;
|
||||
|
@ -964,50 +969,60 @@ const Audio = function(tp, record) {
|
|||
}
|
||||
|
||||
const propsToSet = [];
|
||||
getLayers().forEach((layer) => {
|
||||
if (mapping.hasOwnProperty(layer.id())) {
|
||||
Object.keys(mapping[layer.id()]).forEach((propTitle) => {
|
||||
const m = mapping[layer.id()][propTitle];
|
||||
Object.keys(mapping).forEach((layerID) => {
|
||||
Object.keys(mapping[layerID]).forEach((propTitle) => {
|
||||
const m = mapping[layerID][propTitle];
|
||||
switch (m.sync) {
|
||||
case 'volume': {
|
||||
let a = mapValue(m.max_v, m.min_in, m.max_in, m.min_out, m.max_out, true);
|
||||
m.value = m.value * m.smoothing + (1.0 - m.smoothing) * a;
|
||||
propsToSet.push({
|
||||
layer,
|
||||
id: layer.id(),
|
||||
id: layerID,
|
||||
title: propTitle,
|
||||
value: m.value,
|
||||
});
|
||||
break;
|
||||
}
|
||||
case 'pitch': {
|
||||
const mi = config.audio.ignoreOutboundFrequencies ? m.max_i : max_i;
|
||||
const ri = config.audio.ignoreOutboundFrequencies ? m.max_ri : max_ri;
|
||||
const r = analysedResults[m.source];
|
||||
const mi = config.audio.ignoreOutboundFrequencies ? m.max_i : r.max_i;
|
||||
const ri = config.audio.ignoreOutboundFrequencies ? m.max_ri : r.max_ri;
|
||||
const fi = config.audio.pitchCombineFrequencies ? ri : mi;
|
||||
let a = mapValue(fi, m.min_freq, m.max_freq, m.min_out, m.max_out, true);
|
||||
if (!isNaN(a)) {
|
||||
m.value = m.value * m.smoothing + (1.0 - m.smoothing) * a;
|
||||
propsToSet.push({
|
||||
layer,
|
||||
id: layer.id(),
|
||||
id: layerID,
|
||||
title: propTitle,
|
||||
value: m.value,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'clarity': {
|
||||
const clarity = m.max_v / m.total_v;
|
||||
const a = mapValue(clarity, 0.01, 0.05, m.min_out, m.max_out, true);
|
||||
if (!isNaN(a)) {
|
||||
m.value = m.value * m.smoothing + (1.0 - m.smoothing) * a;
|
||||
propsToSet.push({
|
||||
id: layerID,
|
||||
title: propTitle,
|
||||
value: m.value,
|
||||
});
|
||||
}
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if (m.letterDelay) {
|
||||
const pt = `letterDelays.${propTitle}`;
|
||||
propsToSet.push({
|
||||
layer,
|
||||
id: layer.id(),
|
||||
id: layerID,
|
||||
title: pt,
|
||||
value: m.letterDelay,
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
if (propsToSet.length > 0 && frameCount % 2 === 0) {
|
||||
// this is when to monitor live
|
||||
|
@ -1036,6 +1051,7 @@ const Audio = function(tp, record) {
|
|||
propsToSet.forEach((p) => {
|
||||
const title = tp
|
||||
.getPanelPropTitle(p.title);
|
||||
const layer = getLayer(p.id);
|
||||
|
||||
if (title !== null) {
|
||||
const inputElement = title
|
||||
|
@ -1050,10 +1066,10 @@ const Audio = function(tp, record) {
|
|||
record.addValue(p.id, p.title, p.value, position);
|
||||
if (p.title.indexOf('color') === 0) {
|
||||
if (!config.audio.colorSeparateRGBA || p.title === 'color.a') {
|
||||
record.liveUpdate(p.layer, position);
|
||||
record.liveUpdate(layer, position);
|
||||
}
|
||||
} else {
|
||||
record.liveUpdate(p.layer, position);
|
||||
record.liveUpdate(layer, position);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1070,102 +1086,6 @@ const Audio = function(tp, record) {
|
|||
};
|
||||
drawAlt();
|
||||
}
|
||||
|
||||
const voiceChange = () => {
|
||||
distortion.oversample = "4x";
|
||||
biquadFilter.gain.setTargetAtTime(0, audioCtx.currentTime, 0);
|
||||
|
||||
const voiceSetting = voiceSelect.value;
|
||||
|
||||
if (echoDelay.isApplied()) {
|
||||
echoDelay.discard();
|
||||
}
|
||||
|
||||
// When convolver is selected it is connected back into the audio path
|
||||
if (voiceSetting == "convolver") {
|
||||
biquadFilter.disconnect(0);
|
||||
biquadFilter.connect(convolver);
|
||||
} else {
|
||||
biquadFilter.disconnect(0);
|
||||
biquadFilter.connect(gainNode);
|
||||
|
||||
if (voiceSetting == "distortion") {
|
||||
distortion.curve = makeDistortionCurve(400);
|
||||
} else if (voiceSetting == "biquad") {
|
||||
biquadFilter.type = "lowshelf";
|
||||
biquadFilter.frequency.setTargetAtTime(1000, audioCtx.currentTime, 0);
|
||||
biquadFilter.gain.setTargetAtTime(25, audioCtx.currentTime, 0);
|
||||
} else if (voiceSetting == "delay") {
|
||||
echoDelay.apply();
|
||||
} else if (voiceSetting == "off") {
|
||||
console.log("Voice settings turned off");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function createEchoDelayEffect(audioContext) {
|
||||
const delay = audioContext.createDelay(1);
|
||||
const dryNode = audioContext.createGain();
|
||||
const wetNode = audioContext.createGain();
|
||||
const mixer = audioContext.createGain();
|
||||
const filter = audioContext.createBiquadFilter();
|
||||
|
||||
delay.delayTime.value = 0.75;
|
||||
dryNode.gain.value = 1;
|
||||
wetNode.gain.value = 0;
|
||||
filter.frequency.value = 1100;
|
||||
filter.type = "highpass";
|
||||
|
||||
return {
|
||||
apply: function() {
|
||||
wetNode.gain.setValueAtTime(0.75, audioContext.currentTime);
|
||||
},
|
||||
discard: function() {
|
||||
wetNode.gain.setValueAtTime(0, audioContext.currentTime);
|
||||
},
|
||||
isApplied: function() {
|
||||
return wetNode.gain.value > 0;
|
||||
},
|
||||
placeBetween: function(inputNode, outputNode) {
|
||||
inputNode.connect(delay);
|
||||
delay.connect(wetNode);
|
||||
wetNode.connect(filter);
|
||||
filter.connect(delay);
|
||||
|
||||
inputNode.connect(dryNode);
|
||||
dryNode.connect(mixer);
|
||||
wetNode.connect(mixer);
|
||||
mixer.connect(outputNode);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// Event listeners to change visualize and voice settings
|
||||
visualSelect.onchange = function() {
|
||||
window.cancelAnimationFrame(drawVisual);
|
||||
visualize();
|
||||
};
|
||||
|
||||
voiceSelect.onchange = function() {
|
||||
voiceChange();
|
||||
};
|
||||
|
||||
mute.onclick = voiceMute;
|
||||
|
||||
let previousGain;
|
||||
|
||||
function voiceMute() {
|
||||
if (mute.id === "") {
|
||||
previousGain = gainNode.gain.value;
|
||||
gainNode.gain.value = 0;
|
||||
mute.id = "activated";
|
||||
mute.innerHTML = "Unmute";
|
||||
} else {
|
||||
gainNode.gain.value = previousGain;
|
||||
mute.id = "";
|
||||
mute.innerHTML = "Mute";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const deinit = () => {
|
||||
|
@ -1193,7 +1113,7 @@ const Audio = function(tp, record) {
|
|||
|
||||
// debug
|
||||
this.canvasCombos = canvasCombos;
|
||||
this.audioSourceCombo = audioSourceCombo;
|
||||
this.audioSourceCombos = audioSourceCombos;
|
||||
};
|
||||
|
||||
export {
|
||||
|
|
|
@ -95,7 +95,14 @@ const config = {
|
|||
'letterDelays': [0, 1000],
|
||||
},
|
||||
ignoreProps: ['transformOrigin', 'fontFamily', 'text', 'mirror_x', 'mirror_y', 'mirror_xy', 'height'],
|
||||
maxFilenameLength: 24,
|
||||
defaultSmoothing: 0.7,
|
||||
analyser: {
|
||||
fftSize: 256 * 8,
|
||||
minDecibels: -90,
|
||||
maxDecibels: -10,
|
||||
smoothingTimeConstant: 0.85,
|
||||
},
|
||||
fftBandsAnalysed: 256 * 8,
|
||||
fftBandsUsed: 256 / 2,
|
||||
fftHeight: 256 / 4,
|
||||
|
|
|
@ -416,12 +416,20 @@ const listAvailableFontsAndAxes = () => {
|
|||
window.listAvailableFontsAndAxes = listAvailableFontsAndAxes;
|
||||
window.getFontsAndAxes = getFontsAndAxes;
|
||||
|
||||
window.getArtboard = () => {
|
||||
return artboard;
|
||||
};
|
||||
|
||||
window.getLayers = () => {
|
||||
return layers;
|
||||
};
|
||||
|
||||
window.getLayer = (layerID) => {
|
||||
if (layerID === 'artboard') {
|
||||
return artboard;
|
||||
} else {
|
||||
return layers.find((layer) => layer.id() === layerID);
|
||||
}
|
||||
};
|
||||
|
||||
window.moveLayerUp = (layerID) => {
|
||||
|
@ -432,10 +440,6 @@ window.moveLayerDown = (layerID) => {
|
|||
layerOrder.moveDown(layerID);
|
||||
};
|
||||
|
||||
window.getArtboard = () => {
|
||||
return artboard;
|
||||
};
|
||||
|
||||
const addLayer = (autoInit = true) => {
|
||||
const layerID = Module.addNewLayer();
|
||||
const layer = new Layer(tp, layerID, fontsAndAxes, autoInit);
|
||||
|
|
|
@ -117,6 +117,7 @@ const LiveUpdater = function(tp, buffy) {
|
|||
};
|
||||
this.immediateUpdate = (layer, values) => {
|
||||
const cv = clone(values);
|
||||
const ctv = clone(layer.theatreObject.value);
|
||||
if (cv.hasOwnProperty('color.r')) {
|
||||
cv['color'] = {
|
||||
r: cv['color.r'],
|
||||
|
@ -129,7 +130,10 @@ const LiveUpdater = function(tp, buffy) {
|
|||
delete cv['color.b'];
|
||||
delete cv['color.a'];
|
||||
}
|
||||
const v = {...layer.theatreObject.value, ...cv};
|
||||
flattenObject(cv, ['color']);
|
||||
flattenObject(ctv, ['color']);
|
||||
const v = {...ctv, ...cv};
|
||||
deFlattenObject(v, ['color']);
|
||||
const p = layer.values2cppProps(v);
|
||||
if (p !== false) {
|
||||
const id = layer.id();
|
||||
|
|
Loading…
Reference in a new issue