voicoding.js
2.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
const express = require('express');
const multer = require('multer');
const fs = require('fs');
const upload = multer();
const app = express();
const port = 5500;
app.use(express.static('Voicoding_web'));
app.use(express.static('./'));
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient();
const audio = {
content: audioBuffer.toString('base64'),
};
const encoding = 'LINEAR16';
const sampleRateHertz = 48000;
const languageCode = 'ko-KR';
const speechContexts = [{
phrases: [
'$$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE',
'$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE 보다 크다',
'$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE 보다 작다',
'for OOV_CLASS_ALPHA_SEQUENCE in range $OOV_CLASS_ALPHANUMERIC_SEQUENCE',
'if',
'이프',
'else if',
'엘스 이프',
'else',
'엘스',
'while',
'와일',
'함수',
'파라미터',
'parameter',
'변수 선언',
'함수 선언',
'반복문 선언',
'조건문 선언'
],
boost: 20
}]
const request = {
audio: audio,
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
alternativeLanguageCodes: ['en-US'],
speechContexts: speechContexts
},
interimResults: false, // If you want interim results, set this to true
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript);
return transcription;
}
app.post('/upload_sound', upload.any(), async (req, res) => {
console.log("Getting text transcription..");
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log("Text transcription: " + transcription);
res.status(200).send(transcription);
});
app.listen(port, () => {
console.log(`Express server listening on port: ${port}...`);
});