diff --git a/ChangeHistory.md b/ChangeHistory.md index 2bc57d5b..ff41f978 100644 --- a/ChangeHistory.md +++ b/ChangeHistory.md @@ -1,3 +1,90 @@ +## 0.7.x + +### New in version 0.7.8 +- NLU Node - Add Syntax to list of selectable features + +### New in version 0.7.7 +- STT Node - Set correct content-type when File-Type reports a mime type of audio/opus for ogg;codec=opus files. + +### New in version 0.7.6 +- Bump SDK Dependency to 3.18.2 +- STT Node To use iam-utils in place of removed iam-token-manager +- STT Node removed codec setting as service can now automatically detect the codec of the input audio and supports more than codec=opus for ogg formats. +- TSS Node fix to add visibility check on tts.voices and not stt.models +- Assistant V1 Workspace Manager Node updated to reflect that in update mode, updated fields +need a new_ prefix in their keys as part of the input json. +- NLC Node - migrate off deprecated methods +- NLC Node - Allow create of a classier to be based on a csv template node. + +### New in version 0.7.5 +- Bump SDK Dependency to 3.15.0 +- Added Portuguese (Brazilian) and Chinese (Simplified and Traditional) as output languages +for Visual Recognition node. +- Added list voices and delete customisation methods to TTS Corpus Builder node. +- STT Node Changes + - Allowing SDK to manage IAM Tokens. + - Streaming mode for STT using IAM key now working. + - Fix to stream mode for max alternatives and smart formatting options + - Keywords, Word Confidence and Customization Weight can now be specified + - Allow Start and End data packets to be specified as JSON objects, as well as +a stringified JSON objects. + - In line with SDK change use createLanguageModel() to create custom model +- Disable SSL Verification option for Assistant Node. +- Natural Language Understanding Node Changes + - Bump Natural Language Understanding to 2018-11-16 + - Add Limit Categories and limit_text_characters options +- Allow JSON input into Personality Insights node. +- Japanese word count was causing a Node-RED crash when run in the cloud. +- Hungarian supported by Language Translator. +- New Document Language Translator node. +- New Assistant V2 Node. +- Discovery Node changes + - Bump Discovery to 2018-12-03 + - Implement Query Notices method +- Bump dependency on file-type to 10.7.0 +- Bump dependency on temp to 0.9.0 + + +### New in version 0.7.4 +- Bump SDK Dependency to 3.11.0 +- Bump Assistant version to 2018-09-20 +- Bump Discovery version to 2018-08-01 +- Bump Natural Language Understanding to 2018-09-21 +- Bump Personality Insights to 2017-10-13 +- Discovery New Environment Size is now a string +- Add Language Text to DropDrown for new supported languages in Translation Node. +- Natural Language Classifier updated for use of IAM key for authentication. +- Fix the Natural Language Understanding for bound IAM key service. +- German is a supported STT Language. +- Visual Recognition Key fix when migrating from unbound to bound service. + +### New in version 0.7.3 +- Modify Discovery Query Builder Node to use `listCollectionFields` to determine query list. + +### New in version 0.7.2 +- Allow version date for Assistant to be specified in `msg.params.version` +to allow optional usage of beta version. + +### New in version 0.7.1 +- Fix to how IAM Key for bound Visual Recognition is retrieved + +### New in version 0.7.0 +- In this release STT in Stream mode with IAM Keys does not work. +- Assistant, Discovery, Language Identify, Language Translator, +Natural Language Understanding, Personality Insights, +Speech to Text, Text to Speech, Tone Analyzer nodes updated +to allow for use of IAM key for authentication. +- Migrated STT node off deprecated methods. +- Fix to Tone Analyzer Node to preserve credentials on config reopen. +- Fix to Tone Analyzer to allow json objects and arrays as payload. +- Fix to STT where auto-connect was not being preserved when reopening configuration. +- Bump to 2018-03-05 version date for Discovery service. +- Move to V3 of Language Translator +- Migrated Discovery Nodes off deprecated methods. +- Remove Deprecated Retrieve and Rank Nodes + + + ## 0.6.x ### New in version 0.6.14 diff --git a/README.md b/README.md index 8786a28b..6c894a00 100644 --- a/README.md +++ b/README.md @@ -7,89 +7,27 @@ Node-RED Watson Nodes for IBM Cloud CLA assistant -### New in version 0.7.8 -- NLU Node - Add Syntax to list of selectable features - -### New in version 0.7.7 -- STT Node - Set correct content-type when File-Type reports a mime type of audio/opus for ogg;codec=opus files. - -### New in version 0.7.6 -- Bump SDK Dependency to 3.18.2 -- STT Node To use iam-utils in place of removed iam-token-manager -- STT Node removed codec setting as service can now automatically detect the codec of the input audio and supports more than codec=opus for ogg formats. -- TSS Node fix to add visibility check on tts.voices and not stt.models -- Assistant V1 Workspace Manager Node updated to reflect that in update mode, updated fields -need a new_ prefix in their keys as part of the input json. -- NLC Node - migrate off deprecated methods -- NLC Node - Allow create of a classier to be based on a csv template node. - -### New in version 0.7.5 -- Bump SDK Dependency to 3.15.0 -- Added Portuguese (Brazilian) and Chinese (Simplified and Traditional) as output languages -for Visual Recognition node. -- Added list voices and delete customisation methods to TTS Corpus Builder node. -- STT Node Changes - - Allowing SDK to manage IAM Tokens. - - Streaming mode for STT using IAM key now working. - - Fix to stream mode for max alternatives and smart formatting options - - Keywords, Word Confidence and Customization Weight can now be specified - - Allow Start and End data packets to be specified as JSON objects, as well as -a stringified JSON objects. - - In line with SDK change use createLanguageModel() to create custom model -- Disable SSL Verification option for Assistant Node. -- Natural Language Understanding Node Changes - - Bump Natural Language Understanding to 2018-11-16 - - Add Limit Categories and limit_text_characters options -- Allow JSON input into Personality Insights node. -- Japanese word count was causing a Node-RED crash when run in the cloud. -- Hungarian supported by Language Translator. -- New Document Language Translator node. -- New Assistant V2 Node. -- Discovery Node changes - - Bump Discovery to 2018-12-03 - - Implement Query Notices method -- Bump dependency on file-type to 10.7.0 -- Bump dependency on temp to 0.9.0 - - -### New in version 0.7.4 -- Bump SDK Dependency to 3.11.0 -- Bump Assistant version to 2018-09-20 -- Bump Discovery version to 2018-08-01 -- Bump Natural Language Understanding to 2018-09-21 -- Bump Personality Insights to 2017-10-13 -- Discovery New Environment Size is now a string -- Add Language Text to DropDrown for new supported languages in Translation Node. -- Natural Language Classifier updated for use of IAM key for authentication. -- Fix the Natural Language Understanding for bound IAM key service. -- German is a supported STT Language. -- Visual Recognition Key fix when migrating from unbound to bound service. - -### New in version 0.7.3 -- Modify Discovery Query Builder Node to use `listCollectionFields` to determine query list. - -### New in version 0.7.2 -- Allow version date for Assistant to be specified in `msg.params.version` -to allow optional usage of beta version. - -### New in version 0.7.1 -- Fix to how IAM Key for bound Visual Recognition is retrieved - -### New in version 0.7.0 -- In this release STT in Stream mode with IAM Keys does not work. -- Assistant, Discovery, Language Identify, Language Translator, -Natural Language Understanding, Personality Insights, -Speech to Text, Text to Speech, Tone Analyzer nodes updated -to allow for use of IAM key for authentication. -- Migrated STT node off deprecated methods. -- Fix to Tone Analyzer Node to preserve credentials on config reopen. -- Fix to Tone Analyzer to allow json objects and arrays as payload. -- Fix to STT where auto-connect was not being preserved when reopening configuration. -- Bump to 2018-03-05 version date for Discovery service. -- Move to V3 of Language Translator -- Migrated Discovery Nodes off deprecated methods. -- Remove Deprecated Retrieve and Rank Nodes +### New in version 0.8.0 +- In the 0.8.x releases the nodes are migrated to a node-red 1.0.x input +event callback function signature. +and migrated off watson-developer-cloud to ibm-watson as a npm dependancy. +Migrated nodes will not be compatible with pre 1.0.0 versions of node-red. +During the migration there will be a dependancy on both modules. +- Bump dependancy on node to >=10.0.0 +- Bump dependancy on cfenv, request, file-type +- Bump dependancy on ibm-cloud-sdk-core to 0.3.7 (need to stay on 0.x, for STT Streaming to work) +- Node-RED & IBM-Watson & Use of promises on API invokation & IAM URL construct migration & Removal of default endpoint of + - Tone Analyzer node. + - Personality Insights node. + - Visual Recognition V3 node + - Text to Speech node + - Text to Speech Corpus Builder node +- New Visual Recognition V4 node. +- Drop faces detect option from Visual Recognition V3 node. +- Fix to URL parsing for bound services. +- STT token manager no longer in ibm-cloud-sdk-core +- Update language lists for STT, TTS, Language Translator and Document Translator Nodes ### Watson Nodes for Node-RED A collection of nodes to interact with the IBM Watson services in [IBM Cloud](http://bluemix.net). diff --git a/package.json b/package.json index 2a04c3ca..872b5f96 100644 --- a/package.json +++ b/package.json @@ -1,17 +1,18 @@ { "name": "node-red-node-watson", - "version": "0.7.8", + "version": "0.8.0", "description": "A collection of Node-RED nodes for IBM Watson services", "dependencies": { "async": "^1.5.2", - "cfenv": "~1.0.0", - "file-type": "^10.9.0", - "request": "~2.86.0", + "cfenv": "~1.2.2", + "file-type": "^12.4.2", + "request": "~2.88.0", "temp": "^0.9.0", "qs": "6.x", "image-type": "^2.0.2", "watson-developer-cloud": "^3.18.3", - "ibm-cloud-sdk-core": "^0.0.1", + "ibm-cloud-sdk-core": "^0.3.7", + "ibm-watson": "^5.2.1", "word-count": "^0.2.2", "is-docx": "^0.0.3", "stream-to-array": "^2.3.0", @@ -53,10 +54,11 @@ "watson-text-to-speech-v1": "services/text_to_speech/v1.js", "watson-text-to-speech-corpus-builder-v1": "services/text_to_speech/v1-corpus-builder.js", "watson-tone-analyzer-v3": "services/tone_analyzer/v3.js", - "watson-visual-recognition-v3": "services/visual_recognition/v3.js" + "watson-visual-recognition-v3": "services/visual_recognition/v3.js", + "watson-visual-recognition-collection-utils-v4": "services/visual_recognition/v4-collection-utils.js" } }, "engines": { - "node": ">=6.12.0" + "node": ">=10.0.0" } } diff --git a/services/language_translator/v3-doc.html b/services/language_translator/v3-doc.html index db2b354f..1b74bcca 100644 --- a/services/language_translator/v3-doc.html +++ b/services/language_translator/v3-doc.html @@ -191,7 +191,7 @@



- +

For more information about the Language Translator service, read the documentation. @@ -217,35 +217,44 @@ doctor.LANGUAGES = { 'ar' : 'Arabic', 'arz': 'Spoken Arabic', + 'bg' : 'Bulgarian', 'ca' : 'Catalan', 'cs' : 'Czech', 'da' : 'Danish', - 'en' : 'English' , + 'el' : 'Greek', + 'en' : 'English', 'es' : 'Spanish', + 'et' : 'Estonian', 'fr' : 'French', 'fi' : 'Finnish', + 'ga' : 'Galican', + 'he' : 'Hebrew', 'hi' : 'Hindi', + 'hr' : 'Croatian', 'hu' : 'Hungarian', + 'id' : 'Indonesian', 'it' : 'Italian', 'de' : 'German', 'ja' : 'Japanese', + 'lt' : 'Lithuanian', + 'ms' : 'Malay', 'pt' : 'Portuguese', 'ko' : 'Korean', 'nb' : 'Norwegian Bokmål', 'nl' : 'Dutch', 'pl' : 'Polish', + 'ro' : 'Romanian', 'ru' : 'Russian', + 'sk' : 'Slovak', + 'sl' : 'Slovenian', 'sv' : 'Swedish', + 'th' : 'Thai', 'tr' : 'Turkish', 'zh' : 'Chinese', 'zh-TW' : 'Taiwanese', 'zht' : 'Traditional Chinese' }; - - - - doctor.showSelectedFields = function(fields) { for (i = 0; i < fields.length; i++) { $(fields[i]).parent().show(); @@ -488,7 +497,7 @@ + '"' + lang + '"' + selectedText + '>' - + (tor.LANGUAGES[lang] ? tor.LANGUAGES[lang] : lang) + + (doctor.LANGUAGES[lang] ? doctor.LANGUAGES[lang] : lang) + ''); }); } diff --git a/services/language_translator/v3.html b/services/language_translator/v3.html index c22152b9..9ae9e443 100644 --- a/services/language_translator/v3.html +++ b/services/language_translator/v3.html @@ -228,25 +228,38 @@ tor.LANGUAGES = { 'ar' : 'Arabic', 'arz': 'Spoken Arabic', + 'bg' : 'Bulgarian', 'ca' : 'Catalan', 'cs' : 'Czech', 'da' : 'Danish', - 'en' : 'English' , + 'el' : 'Greek', + 'en' : 'English', 'es' : 'Spanish', + 'et' : 'Estonian', 'fr' : 'French', 'fi' : 'Finnish', + 'ga' : 'Galican', + 'he' : 'Hebrew', 'hi' : 'Hindi', + 'hr' : 'Croatian', 'hu' : 'Hungarian', + 'id' : 'Indonesian', 'it' : 'Italian', 'de' : 'German', 'ja' : 'Japanese', + 'lt' : 'Lithuanian', + 'ms' : 'Malay', 'pt' : 'Portuguese', 'ko' : 'Korean', 'nb' : 'Norwegian Bokmål', 'nl' : 'Dutch', 'pl' : 'Polish', + 'ro' : 'Romanian', 'ru' : 'Russian', + 'sk' : 'Slovak', + 'sl' : 'Slovenian', 'sv' : 'Swedish', + 'th' : 'Thai', 'tr' : 'Turkish', 'zh' : 'Chinese', 'zh-TW' : 'Taiwanese', diff --git a/services/personality_insights/v3.html b/services/personality_insights/v3.html index 6e78fcfd..c7258aa2 100644 --- a/services/personality_insights/v3.html +++ b/services/personality_insights/v3.html @@ -38,12 +38,7 @@

- - - -
-
- +
@@ -129,8 +124,7 @@ outputlang: {value: "en"}, rawscores: {value: false}, consumption: {value: false}, - 'default-endpoint' :{value: true}, - 'service-endpoint' :{value: 'https://gateway.watsonplatform.net/personality-insights/api'} + 'service-endpoint' :{value: ""} }, credentials: { username: {type:"text"}, @@ -149,15 +143,6 @@ return this.name ? "node_label_italic" : ""; }, oneditprepare: function() { - $('#node-input-default-endpoint').change(function () { - var checked = $('#node-input-default-endpoint').prop('checked') - if (checked) { - $('#node-input-service-endpoint').parent().hide(); - } else { - $('#node-input-service-endpoint').parent().show(); - } - }); - $.getJSON('watson-personality-insights-v3/vcap/') .done(function (service) { $('.credentials').toggle(!service); diff --git a/services/personality_insights/v3.js b/services/personality_insights/v3.js index 92200015..fafed441 100644 --- a/services/personality_insights/v3.js +++ b/services/personality_insights/v3.js @@ -15,9 +15,11 @@ **/ module.exports = function (RED) { - const SERVICE_IDENTIFIER = 'personality-insights'; + const SERVICE_IDENTIFIER = 'personality-insights', + PersonalityInsightsV3 = require('ibm-watson/personality-insights/v3'), + { IamAuthenticator } = require('ibm-watson/auth'); + var pkg = require('../../package.json'), - PersonalityInsightsV3 = require('watson-developer-cloud/personality-insights/v3'), payloadutils = require('../../utilities/payload-utils'), serviceutils = require('../../utilities/service-utils'), service = serviceutils.getServiceCreds(SERVICE_IDENTIFIER), @@ -30,7 +32,7 @@ module.exports = function (RED) { endpoint = '', sEndpoint = 'https://gateway.watsonplatform.net/personality-insights/api', - VALID_INPUT_LANGUAGES = ['ar','en','es','ja'], + VALID_INPUT_LANGUAGES = ['ar','en','es','ja', 'ko'], VALID_RESPONSE_LANGUAGES = ['ar','de','en','es','fr','it','ja','ko','pt-br','zh-cn','zh-tw']; if (service) { @@ -108,8 +110,8 @@ module.exports = function (RED) { params = { content: msg.payload, - consumption_preferences: config.consumption ? config.consumption : false, - raw_scores: config.rawscores ? config.rawscores : false, + consumptionPreferences: config.consumption ? config.consumption : false, + rawScores: config.rawscores ? config.rawscores : false, headers: { 'content-language': inputlang, 'accept-language': outputlang, @@ -118,9 +120,9 @@ module.exports = function (RED) { }; if ('string' === typeof msg.payload) { - params.content_type = 'text/plain'; + params.contentType = 'text/plain'; } else { - params.content_type = 'application/json'; + params.contentType = 'application/json'; } return Promise.resolve(params); @@ -128,7 +130,7 @@ module.exports = function (RED) { function setEndPoint(config) { endpoint = sEndpoint; - if ((!config['default-endpoint']) && config['service-endpoint']) { + if (config['service-endpoint']) { endpoint = config['service-endpoint']; } return Promise.resolve(); @@ -136,35 +138,42 @@ module.exports = function (RED) { function executeService(msg, params) { var p = new Promise(function resolver(resolve, reject) { - var personality_insights = null, - serviceSettings = { - version_date: '2017-10-13', + let personality_insights = null; + let authSettings = {}; + let serviceSettings = { + version: '2017-10-13', headers: { 'User-Agent': pkg.name + '-' + pkg.version } }; if (apikey) { - serviceSettings.iam_apikey = apikey; + authSettings.apikey = apikey; } else { - serviceSettings.username = username; - serviceSettings.password = password; + authSettings.username = username; + authSettings.password = password; } + serviceSettings.authenticator = new IamAuthenticator(authSettings); + if (endpoint) { serviceSettings.url = endpoint; } personality_insights = new PersonalityInsightsV3(serviceSettings); - personality_insights.profile(params, function(err, response){ - if (err) { - reject(err); - } else { - msg.insights = response; + personality_insights.profile(params) + .then((profile) => { + if (profile && profile.result) { + msg.insights = profile.result; + } else { + msg.insights = profile; + } resolve(); - } - }); + }) + .catch((err) => { + reject(err); + }) }); return p; @@ -177,7 +186,7 @@ module.exports = function (RED) { var node = this, message = ''; - this.on('input', function (msg) { + this.on('input', function(msg, send, done) { node.status({}); payloadCheck(msg) @@ -200,11 +209,13 @@ module.exports = function (RED) { }) .then(function(){ node.status({}); - node.send(msg); + send(msg); + done(); }) .catch(function(err){ payloadutils.reportError(node, msg, err); - node.send(msg); + send(msg); + done(err); }); }); } diff --git a/services/speech_to_text/v1.html b/services/speech_to_text/v1.html index 4b3c668d..833bed54 100644 --- a/services/speech_to_text/v1.html +++ b/services/speech_to_text/v1.html @@ -233,8 +233,15 @@ 'en-GB': 'UK English', 'de-DE': 'German', 'fr-FR': 'French', + 'nl-NL': 'Dutch', + 'it-IT': 'Italian', 'zh-CN': 'Mandarin', 'es-ES': 'Spanish', + 'es-CO': 'Spanish (Columbian)', + 'es-MX': 'Spanish (Mexican)', + 'es-CL': 'Spanish (Chilean)', + 'es-AR': 'Spanish (Argentinian)', + 'es-PE': 'Spanish (Peruvian)', 'ar-AR': 'Arablic', 'ko-KR': 'Korean', 'ja-JP': 'Japanese' diff --git a/services/speech_to_text/v1.js b/services/speech_to_text/v1.js index aa4ddc52..edd0fe2f 100644 --- a/services/speech_to_text/v1.js +++ b/services/speech_to_text/v1.js @@ -28,7 +28,10 @@ module.exports = function (RED) { iamutils = require('../../utilities/iam-utils'), sttutils = require('./stt-utils'), AuthV1 = require('watson-developer-cloud/authorization/v1'), - AuthIAMV1 = require('ibm-cloud-sdk-core/iam-token-manager/v1'), + //AuthIAMV1 = require('ibm-cloud-sdk-core/iam-token-manager/v1'), + AuthIAMV1 = require('ibm-cloud-sdk-core/auth/iam-token-manager-v1'), + //AuthIAMV1 = require('ibm-cloud-sdk-core/auth/token-managers/iam-token-manager'), + //{ IamTokenManager } = require('ibm-watson/auth'); muteMode = true, discardMode = false, autoConnect = true, username = '', password = '', sUsername = '', sPassword = '', apikey = '', sApikey = '', @@ -325,7 +328,9 @@ module.exports = function (RED) { // console.log('Creating token with endpoint ', endpoint); // tokenService = new AuthIAMV1.IamTokenManagerV1({iamApikey : apikey, iamUrl: endpoint}); + //tokenService = new AuthIAMV1({apikey : apikey}); tokenService = new AuthIAMV1.IamTokenManagerV1({iamApikey : apikey}); + //tokenService = new AuthIAMV1.IamTokenManager({apikey : apikey}); //tokenService = new iamutils(apikey); } else { @@ -442,13 +447,11 @@ module.exports = function (RED) { tokenPending = true; tokenService.getToken(function (err, res) { if (err) { - // console.log('Error getting token ', err); reject(err); } else { tokenPending = false; tokenTime = now; token = res; - // console.log('We have the token ', token); resolve(); } }); @@ -662,7 +665,6 @@ module.exports = function (RED) { .then(() => { switch (audioData.action) { case 'start': - // console.log('Its a start'); return processSTTSocketStart(true); case 'stop': delay = 2000; @@ -671,7 +673,6 @@ module.exports = function (RED) { // Add a Delay to allow the listening thread to kick in // Delays for Stop is longer, so that it doesn't get actioned // before the audio buffers. - // console.log('We have data'); setTimeout(() => { if (socketListening) { return sendAudioSTTSocket(audioData); diff --git a/services/text_to_speech/tts-utils.js b/services/text_to_speech/tts-utils.js index 91c76291..1c89f367 100644 --- a/services/text_to_speech/tts-utils.js +++ b/services/text_to_speech/tts-utils.js @@ -15,13 +15,15 @@ **/ const pkg = require('../../package.json'), - TextToSpeechV1 = require('watson-developer-cloud/text-to-speech/v1'); + TextToSpeechV1 = require('ibm-watson/text-to-speech/v1'), + { IamAuthenticator } = require('ibm-watson/auth'); class TTSUtils { constructor() { } static buildStdSettings (apikey, username, password, endpoint) { + let authSettings = {}; let serviceSettings = { headers: { 'User-Agent': pkg.name + '-' + pkg.version @@ -29,12 +31,14 @@ class TTSUtils { }; if (apikey) { - serviceSettings.iam_apikey = apikey; + authSettings.apikey = apikey; } else { - serviceSettings.username = username; - serviceSettings.password = password; + authSettings.username = username; + authSettings.password = password; } + serviceSettings.authenticator = new IamAuthenticator(authSettings); + if (endpoint) { serviceSettings.url = endpoint; } @@ -45,6 +49,7 @@ class TTSUtils { static initTTSService(req, sApikey, sUsername, sPassword, sEndpoint) { const endpoint = req.query.e ? req.query.e : sEndpoint; + let authSettings = {}; let serviceSettings = { url: endpoint, headers: { @@ -52,16 +57,17 @@ class TTSUtils { }}; if (sApikey || req.query.key) { - serviceSettings.iam_apikey = sApikey ? sApikey : req.query.key; + authSettings.apikey = sApikey ? sApikey : req.query.key; } else { - serviceSettings.username = sUsername ? sUsername : req.query.un; - serviceSettings.password = sPassword ? sPassword : req.query.pwd; + authSettings.username = sUsername ? sUsername : req.query.un; + authSettings.password = sPassword ? sPassword : req.query.pwd; } + serviceSettings.authenticator = new IamAuthenticator(authSettings); + return new TextToSpeechV1(serviceSettings); } - } module.exports = TTSUtils; diff --git a/services/text_to_speech/v1-corpus-builder.html b/services/text_to_speech/v1-corpus-builder.html index 1cab219a..bd6223f3 100644 --- a/services/text_to_speech/v1-corpus-builder.html +++ b/services/text_to_speech/v1-corpus-builder.html @@ -44,11 +44,6 @@
- - - -
-
@@ -203,6 +198,7 @@ 'fr-FR': 'French', 'it-IT': 'Italian', 'de-DE': 'German', + 'nl-NL': 'Dutch', 'zh-CN': 'Mandarin', 'es-ES': 'Spanish', 'es-LA': 'Latin American Spanish', @@ -232,7 +228,7 @@ var k = $('#node-input-apikey').val(); if ( (k && k.length) || (u && u.length && p) ) { - if (!ttsv1qbb.voices) { + if (!ttsv1qbb.voices) {; ttsv1qbb.getVoices(); } } @@ -381,14 +377,6 @@ } }); - $('#node-input-default-endpoint').change(function () { - var checked = $('#node-input-default-endpoint').prop('checked') - if (checked) { - $('#node-input-service-endpoint').parent().hide(); - } else { - $('#node-input-service-endpoint').parent().show(); - } - }); } // The dynamic nature of the selection fields in this node has caused problems. @@ -495,9 +483,8 @@ var e = $('#node-input-service-endpoint').val(); var creds = {un: u, pwd: p, key: k}; - if (! $('#node-input-default-endpoint').prop('checked')) { - creds.e = e; - } + creds.e = e; + $.getJSON('watson-text-to-speech-v1-query-builder/voices/',creds) .done(function (data) { if (data.error) { @@ -564,8 +551,7 @@ 'tts-voice-or-custom' : {value: ''}, 'tts-voice': {value: ""}, 'tts-voicehidden': {value: ""}, - 'default-endpoint' :{value: true}, - 'service-endpoint' :{value: 'https://stream.watsonplatform.net/text-to-speech/api'} + 'service-endpoint' :{value: ''} }, credentials: { username: {type:'text'} //, diff --git a/services/text_to_speech/v1-corpus-builder.js b/services/text_to_speech/v1-corpus-builder.js index c719c6b8..ae56c1e8 100644 --- a/services/text_to_speech/v1-corpus-builder.js +++ b/services/text_to_speech/v1-corpus-builder.js @@ -18,6 +18,7 @@ module.exports = function (RED) { const SERVICE_IDENTIFIER = 'text-to-speech'; var temp = require('temp'), fs = require('fs'), + fsp = require('fs').promises, fileType = require('file-type'), serviceutils = require('../../utilities/service-utils'), payloadutils = require('../../utilities/payload-utils'), @@ -47,160 +48,193 @@ module.exports = function (RED) { sEndpoint = service.url; } - function executeCreateCustomisation(node, tts, params, msg) { - tts.createCustomization(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); + function parseResponseFor(msg, response, field) { + if (response && response.result) { + if (response.result[field]) { + msg[field] = response.result[field]; } else { - msg['customization_id'] = response; + msg[field] = response.result; } - node.send(msg); + } else { + msg[field] = response; + } + } + + function executeCreateCustomisation(node, tts, params, msg) { + return new Promise(function resolver(resolve, reject) { + tts.createVoiceModel(params) + .then((response) => { + if (response && response.result) { + msg['customization_id'] = response.result; + } else { + msg['customization_id'] = response; + } + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeListCustomisations(node, tts, params, msg) { - tts.listVoiceModels(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['customizations'] = response.customizations ? - response.customizations: response; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.listVoiceModels(params) + .then((response) => { + parseResponseFor(msg, response, 'customizations'); + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeListVoices(node, tts, params, msg) { - tts.listVoices(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['voices'] = response.voices ? response.voices: response; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.listVoices(params) + .then((response) => { + parseResponseFor(msg, response, 'voices'); + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeGetCustomisation(node, tts, params, msg) { - tts.getVoiceModel(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['customization'] = response ; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.getVoiceModel(params) + .then((response) => { + if (response && response.result) { + msg['customization'] = response.result; + } else { + msg['customization'] = response; + } + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeDeleteCustomisation(node, tts, params, msg) { - tts.deleteVoiceModel(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['response'] = response ; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.deleteVoiceModel(params) + .then((response) => { + msg['response'] = response; + resolve(); + }) + .catch((err) => { + reject(err); + }) }); } function executeGetPronounce(node, tts, params, msg) { - tts.getPronunciation(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['pronunciation'] = response.pronunciation ? - response.pronunciation : response; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.getPronunciation(params) + .then((response) => { + parseResponseFor(msg, response, 'pronunciation'); + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeAddWords(node, tts, params, msg) { - tts.addWords(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['addwordsresponse'] = response ; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.addWords(params) + .then((response) => { + msg['addwordsresponse'] = response; + resolve(); + }) + .catch((err) => { + reject(err); + }) }); } function executeGetWords(node, tts, params, msg) { - tts.listWords(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['words'] = response.words ? response.words: response; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.listWords(params) + .then((response) => { + parseResponseFor(msg, response, 'words'); + resolve(); + }) + .catch((err) => { + reject(err); + }); }); } function executeDeleteWord(node, tts, params, msg) { - tts.deleteWord(params, function (err, response) { - node.status({}); - if (err) { - payloadutils.reportError(node, msg, err); - } else { - msg['deletewordsresponse'] = response; - } - node.send(msg); + return new Promise(function resolver(resolve, reject) { + tts.deleteWord(params) + .then((response) => { + msg['deletewordsresponse'] = response; + resolve(); + }) + .catch((err) => { + reject(err); + }) }); } function executeUnknownMethod(node, tts, params, msg) { - payloadutils.reportError(node, msg, 'Unknown Mode'); - msg.error = 'Unable to process as unknown mode has been specified'; - node.send(msg); + return new Promise(function resolver(resolve, reject) { + payloadutils.reportError(node, msg, 'Unknown Mode'); + msg.error = 'Unable to process as unknown mode has been specified'; + node.send(msg); + resolve(); + }); } function executeMethod(node, method, params, msg) { - var tts = ttsutils.buildStdSettings(apikey, username, password, endpoint); + let tts = ttsutils.buildStdSettings(apikey, username, password, endpoint); + let p = null; node.status({fill:'blue', shape:'dot', text:'executing'}); switch (method) { case 'createCustomisation': - executeCreateCustomisation(node, tts, params, msg); + p = executeCreateCustomisation(node, tts, params, msg); break; case 'listCustomisations': - executeListCustomisations(node, tts, params, msg); + p = executeListCustomisations(node, tts, params, msg); break; case 'listVoices': - executeListVoices(node, tts, params, msg); + p = executeListVoices(node, tts, params, msg); break; case 'getCustomisation': - executeGetCustomisation(node, tts, params, msg); + p = executeGetCustomisation(node, tts, params, msg); break; case 'deleteCustomisation': - executeDeleteCustomisation(node, tts, params, msg); + p = executeDeleteCustomisation(node, tts, params, msg); break; case 'getPronounce': - executeGetPronounce(node, tts, params, msg); + p = executeGetPronounce(node, tts, params, msg); break; case 'addWords': - executeAddWords(node, tts, params, msg); + p = executeAddWords(node, tts, params, msg); break; case 'getWords': - executeGetWords(node, tts, params, msg); + p = executeGetWords(node, tts, params, msg); break; case 'deleteWord': - executeDeleteWord(node, tts, params, msg); + p = executeDeleteWord(node, tts, params, msg); break; default: - executeUnknownMethod(node, tts, params, msg); + p = executeUnknownMethod(node, tts, params, msg); break; } + + return p; } function setFileParams(method, params, msg) { @@ -213,51 +247,47 @@ module.exports = function (RED) { } function loadFile(node, method, params, msg) { - temp.open({ - suffix: '.txt' - }, function(err, info) { - if (err) { - node.status({ - fill: 'red', - shape: 'dot', - text: 'Error receiving the data buffer for training' - }); - throw err; - } - - // Syncing up the asynchronous nature of the stream - // so that the full file can be sent to the API. - fs.writeFile(info.path, msg.payload, function(err) { + return new Promise(function resolver(resolve, reject) { + temp.open({ + suffix: '.txt' + }, function(err, info) { if (err) { node.status({ fill: 'red', shape: 'dot', - text: 'Error processing data buffer for training' + text: 'Error receiving the data buffer for training' }); throw err; } - switch (method) { - case 'addWords': - try { - params.words = JSON.parse(fs.readFileSync(info.path, 'utf8')); - } catch (err) { - params.words = fs.createReadStream(info.path); - } - } - - executeMethod(node, method, params, msg); - temp.cleanup(); + // Syncing up the asynchronous nature of the stream + // so that the full file can be sent to the API. + fsp.writeFile(info.path, msg.payload) + .then(() => { + switch (method) { + case 'addWords': + try { + params.words = JSON.parse(fs.readFileSync(info.path, 'utf8')); + } catch (err) { + params.words = fs.createReadStream(info.path); + } + } + resolve(); + }) + .catch((err) => { + reject(err); + }) }); + }); } function checkForFile(method) { switch (method) { case 'addWords': - return true; + return Promise.resolve(true); } - return false; + return Promise.resolve(false); } function paramsForNewCustom(config) { @@ -286,13 +316,12 @@ module.exports = function (RED) { } if ('custom' === config['tts-voice-or-custom']) { if (config['tts-custom-id']) { - params['customization_id'] = config['tts-custom-id']; + params['customizationId'] = config['tts-custom-id']; } } else if ( config['tts-voice'] ) { params['voice'] = config['tts-voice']; } - console.log('Params will be :', params); return params; } @@ -318,7 +347,7 @@ module.exports = function (RED) { case 'addWords': case 'getWords': if (config['tts-custom-id']) { - params['customization_id'] = config['tts-custom-id']; + params['customizationId'] = config['tts-custom-id']; } break; } @@ -335,21 +364,24 @@ module.exports = function (RED) { res.json(service ? {bound_service: true} : null); }); - // API used by widget to fetch available voices RED.httpAdmin.get('/watson-text-to-speech-v1-query-builder/voices', function (req, res) { var tts = ttsutils.initTTSService(req, sApikey, sUsername, sPassword, sEndpoint); - tts.listVoices({}, function(err, voices){ - if (err) { + tts.listVoices({}) + .then((response) => { + let voices = response; + if (response.result) { + voices = response.result; + } + res.json(voices); + }) + .catch((err) => { if (!err.error) { err.error = 'Error ' + err.code + ' in fetching voices'; } res.json(err); - } else { - res.json(voices); - } - }); + }); }); // This is the Speech to Text V1 Query Builder Node @@ -357,7 +389,7 @@ module.exports = function (RED) { RED.nodes.createNode(this, config); var node = this; - this.on('input', function (msg) { + this.on('input', function(msg, send, done) { var method = config['tts-custom-mode'], message = '', params = {}; @@ -367,7 +399,7 @@ module.exports = function (RED) { apikey = sApikey || this.credentials.apikey || config.apikey; endpoint = sEndpoint; - if ((!config['default-endpoint']) && config['service-endpoint']) { + if (config['service-endpoint']) { endpoint = config['service-endpoint']; } @@ -384,16 +416,32 @@ module.exports = function (RED) { return; } - if (checkForFile(method)) { - if (msg.payload instanceof Buffer) { - loadFile(node, method, params, msg); - return; - } - params = setFileParams(method, params, msg); - } - - executeMethod(node, method, params, msg); - }); + checkForFile(method) + .then((lookForBuffer) => { + if (msg.payload instanceof Buffer) { + console.log('Processing as a Buffer'); + return loadFile(node, method, params, msg); + } else { + params = setFileParams(method, params, msg); + return Promise.resolve(); + } + }) + .then(() => { + return executeMethod(node, method, params, msg); + }) + .then(() => { + node.status({}); + send(msg); + temp.cleanup(); + done(); + }) + .catch((err) => { + node.status({}); + payloadutils.reportError(node, msg, err); + temp.cleanup(); + done(err); + }) + }); } RED.nodes.registerType('watson-text-to-speech-v1-query-builder', Node, { diff --git a/services/text_to_speech/v1.html b/services/text_to_speech/v1.html index 0d620f89..afb62c11 100644 --- a/services/text_to_speech/v1.html +++ b/services/text_to_speech/v1.html @@ -44,11 +44,6 @@
- - - -
-
@@ -130,6 +125,7 @@ 'fr-FR': 'French', 'it-IT': 'Italian', 'de-DE': 'German', + 'nl-NL': 'Dutch', 'zh-CN': 'Mandarin', 'es-ES': 'Spanish', 'es-LA': 'Latin American Spanish', @@ -246,9 +242,8 @@ var e = $('#node-input-service-endpoint').val(); var creds = {un: u, pwd: p, key: k}; - if (! $('#node-input-default-endpoint').prop('checked')) { - creds.e = e; - } + creds.e = e; + $.getJSON('watson-text-to-speech/customs', creds) .done(function (data) { $('label#node-label-message').parent().hide(); @@ -292,9 +287,7 @@ var e = $('#node-input-service-endpoint').val(); var creds = {un: u, pwd: p, key: k}; - if (! $('#node-input-default-endpoint').prop('checked')) { - creds.e = e; - } + creds.e = e; $.getJSON('watson-text-to-speech/voices/', creds) .done(function (data) { @@ -434,14 +427,6 @@ $('#node-input-langcustom').change(function (val) { tts.custom_selected = $('#node-input-langcustom').val(); }); - $('#node-input-default-endpoint').change(function () { - var checked = $('#node-input-default-endpoint').prop('checked') - if (checked) { - $('#node-input-service-endpoint').parent().hide(); - } else { - $('#node-input-service-endpoint').parent().show(); - } - }); } // Function to be used at the start, as don't want to expose any fields, unless the models are @@ -498,8 +483,7 @@ password: {value: ''}, apikey: {value: ''}, 'payload-response' :{value: false}, - 'default-endpoint' :{value: true}, - 'service-endpoint' :{value: 'https://stream.watsonplatform.net/text-to-speech/api'} + 'service-endpoint' :{value: ""} }, credentials: { username: {type:"text"}, diff --git a/services/text_to_speech/v1.js b/services/text_to_speech/v1.js index da5416ea..f2544bd5 100644 --- a/services/text_to_speech/v1.js +++ b/services/text_to_speech/v1.js @@ -16,8 +16,8 @@ module.exports = function(RED) { const SERVICE_IDENTIFIER = 'text-to-speech'; + var pkg = require('../../package.json'), - TextToSpeechV1 = require('watson-developer-cloud/text-to-speech/v1'), serviceutils = require('../../utilities/service-utils'), payloadutils = require('../../utilities/payload-utils'), ttsutils = require('./tts-utils'), @@ -54,28 +54,36 @@ module.exports = function(RED) { RED.httpAdmin.get('/watson-text-to-speech/voices', function (req, res) { var tts = ttsutils.initTTSService(req, sApikey, sUsername, sPassword, sEndpoint); - tts.listVoices({}, function(err, voices){ - if (err) { + tts.listVoices({}) + .then((response) => { + let voices = response; + if (response.result) { + voices = response.result; + } + res.json(voices); + }) + .catch((err) => { if (!err.error) { err.error = 'Error ' + err.code + ' in fetching voices'; } res.json(err); - } else { - res.json(voices); - } - }); + }); }); // API used by widget to fetch available customisations RED.httpAdmin.get('/watson-text-to-speech/customs', function (req, res) { var tts = ttsutils.initTTSService(req, sApikey, sUsername, sPassword, sEndpoint); - tts.listVoiceModels({}, function(err, customs){ - if (err) { - res.json(err); - } else { - res.json(customs); + tts.listVoiceModels({}) + .then((response) => { + let customs = response; + if (response.result) { + customs = response.result; } + res.json(customs); + }) + .catch((err) => { + res.json(err); }); }); @@ -106,42 +114,59 @@ module.exports = function(RED) { // Check the params for customisation options if (config.langcustom && 'NoCustomisationSetting' !== config.langcustom) { - params.customization_id = config.langcustom; + params.customizationId = config.langcustom; } return Promise.resolve(params); } function performTTS(msg, params) { var p = new Promise(function resolver(resolve, reject) { - var tts = ttsutils.buildStdSettings(apikey, username, password, endpoint); + let tts = ttsutils.buildStdSettings(apikey, username, password, endpoint); - tts.synthesize(params, function (err, body, response) { - if (err) { - reject(err); - } else { + tts.synthesize(params) + .then((body) => { resolve(body); - } - }); + }) + .catch((err) => { + reject(err); + }); + }); return p; } - function processResponse(msg, body) { - msg.speech = body; - if (config['payload-response']) { + function processResponse(msg, data) { + return new Promise(function resolver(resolve, reject) { + let body = data + if (data && data.result) { + body = data.result; + } + + let tmpHolder = msg.payload; msg.payload = body; - } - return Promise.resolve(); + + payloadutils.checkForStream(msg) + .then(() => { + if (! config['payload-response']) { + msg.speech = msg.payload; + msg.payload = tmpHolder; + } + resolve(); + }) + .catch((err) => { + reject(err); + }); + }); } - this.on('input', function(msg) { + this.on('input', function(msg, send, done) { username = sUsername || this.credentials.username; password = sPassword || this.credentials.password || config.password; apikey = sApikey || this.credentials.apikey || config.apikey; endpoint = sEndpoint; - if ((!config['default-endpoint']) && config['service-endpoint']) { + if (config['service-endpoint']) { endpoint = config['service-endpoint']; } @@ -163,10 +188,12 @@ module.exports = function(RED) { }) .then(function(){ node.status({}); - node.send(msg); + send(msg); + done(); }) .catch(function(err){ payloadutils.reportError(node,msg,err); + done(err); }); }) } diff --git a/services/tone_analyzer/v3.html b/services/tone_analyzer/v3.html index 3e1d51c3..d5e7ca3d 100644 --- a/services/tone_analyzer/v3.html +++ b/services/tone_analyzer/v3.html @@ -38,12 +38,7 @@
- - - -
-
- +
@@ -104,10 +99,26 @@

The service response will be returned on msg.response.

The tone and sentances can be programmaticaly set in msg.tones and msg.sentences

-

Usng the node editor dialog users can filter the results by tone (emotion, language or social) and +

Using the node editor dialog users can filter the results by tone (emotion, language or social) and whether to include sentence-level analysis.

When running the Conversational Chat Tone, the input needs to follow the chat json format for utterences.

+ + msg.payload = [ + { + text: "Hello, I'm having a problem with your product.", + user: "customer", + }, + { + text: "OK, let me know what's going on, please.", + user: "agent", + }, + { + text: "Well, nothing is working :(", + user: "customer", + } + ]; +

For more information about the Tone Analyzer service, read the documentation.

@@ -177,15 +188,6 @@ var method = $('#node-input-tone-method').val(); ToneV3.processSelectedVersion(method, version); }); - - $('#node-input-default-endpoint').change(function () { - var checked = $('#node-input-default-endpoint').prop('checked') - if (checked) { - $('#node-input-service-endpoint').parent().hide(); - } else { - $('#node-input-service-endpoint').parent().show(); - } - }); } ToneV3.checkForPrepare = function () { @@ -219,8 +221,7 @@ "interface-version": {value:"generalTone"}, name: {value: ""}, inputlang: {value: "en"}, - 'default-endpoint' :{value: true}, - 'service-endpoint' :{value: 'https://gateway.watsonplatform.net/tone-analyzer/api'} + 'service-endpoint' :{value: ""} }, credentials: { username: {type:'text'}, diff --git a/services/tone_analyzer/v3.js b/services/tone_analyzer/v3.js index 6fda46db..00ac08c7 100644 --- a/services/tone_analyzer/v3.js +++ b/services/tone_analyzer/v3.js @@ -15,9 +15,11 @@ **/ module.exports = function (RED) { - const SERVICE_IDENTIFIER = 'tone-analyzer'; + const SERVICE_IDENTIFIER = 'tone-analyzer', + ToneAnalyzerV3 = require('ibm-watson/tone-analyzer/v3'), + { IamAuthenticator } = require('ibm-watson/auth'); + var pkg = require('../../package.json'), - ToneAnalyzerV3 = require('watson-developer-cloud/tone-analyzer/v3'), serviceutils = require('../../utilities/service-utils'), payloadutils = require('../../utilities/payload-utils'), toneutils = require('../../utilities/tone-utils'), @@ -97,22 +99,26 @@ module.exports = function (RED) { function invokeService(config, options, settings) { - var serviceSettings = { - version_date: '2017-09-21', + let authSettings = {}; + + let serviceSettings = { + version: '2017-09-21', headers: { 'User-Agent': pkg.name + '-' + pkg.version } }; if (settings.iam_apikey) { - serviceSettings.iam_apikey = settings.iam_apikey; + authSettings.apikey = settings.iam_apikey; } else { - serviceSettings.username = settings.username; - serviceSettings.password = settings.password; + authSettings.username = settings.username; + authSettings.password = settings.password; } + serviceSettings.authenticator = new IamAuthenticator(authSettings); + endpoint = sEndpoint; - if ((!config['default-endpoint']) && config['service-endpoint']) { + if (config['service-endpoint']) { endpoint = config['service-endpoint']; } @@ -121,7 +127,7 @@ module.exports = function (RED) { } if (config['interface-version']) { - serviceSettings.version_date = config['interface-version']; + serviceSettings.version = config['interface-version']; } const tone_analyzer = new ToneAnalyzerV3(serviceSettings); @@ -132,17 +138,17 @@ module.exports = function (RED) { case 'generalTone' : break; case 'customerEngagementTone' : - m = 'tone_chat'; + m = 'toneChat'; break; } - tone_analyzer[m](options, function (err, response) { - if (err) { - reject(err); - } else { + tone_analyzer[m](options) + .then((response) => { resolve(response); - } - }); + }) + .catch((err) => { + reject(err); + }) }); return p; @@ -150,7 +156,7 @@ module.exports = function (RED) { // function when the node recieves input inside a flow. // Configuration is first checked before the service is invoked. - var processOnInput = function(msg, config, node) { + var processOnInput = function(msg, send, done, config, node) { checkConfiguration(msg, node) .then(function(settings) { var options = toneutils.parseOptions(msg, config); @@ -160,13 +166,19 @@ module.exports = function (RED) { }) .then(function(data){ node.status({}) - msg.response = data; - node.send(msg); + if (data && data.result) { + msg.response = data.result; + } else { + msg.response = data; + } + send(msg); node.status({}); + done(); }) .catch(function(err){ payloadutils.reportError(node,msg,err); - node.send(msg); + send(msg); + done(err); }); } @@ -177,8 +189,8 @@ module.exports = function (RED) { var node = this; // Invoked when the node has received an input as part of a flow. - this.on('input', function (msg) { - processOnInput(msg, config, node); + this.on('input', function(msg, send, done) { + processOnInput(msg, send, done, config, node); }); } diff --git a/services/visual_recognition/v3.html b/services/visual_recognition/v3.html index 0bc3a7e6..1e24a82a 100644 --- a/services/visual_recognition/v3.html +++ b/services/visual_recognition/v3.html @@ -26,17 +26,13 @@
- +
@@ -74,14 +70,11 @@
- +
- +