+ {
+ cards2.map((card, index) => (
+
+
{card.title}
+
+ {card.icon}
+
+
+
{card.description}
+
+
+ ))
+ }
+
*/}
+
+ >
)
}
-export default AiServices
\ No newline at end of file
+export default AiServices
diff --git a/src/Pages/SpeechToText.jsx b/src/Pages/SpeechToText.jsx
new file mode 100644
index 0000000..5583693
--- /dev/null
+++ b/src/Pages/SpeechToText.jsx
@@ -0,0 +1,89 @@
+import React, { useState } from 'react';
+import axios from 'axios';
+import { ReactMic } from 'react-mic';
+import Swal from 'sweetalert2';
+
+const SpeechToText = ({ isDarkMode }) => {
+ const [recording, setRecording] = useState(false);
+ const [transcript, setTranscript] = useState('');
+ const [audioBlob, setAudioBlob] = useState(null);
+ const [recognizing, setRecognizing] = useState(false);
+
+ const startRecording = () => {
+ setRecording(true);
+ };
+
+ const stopRecording = () => {
+ setRecording(false);
+ };
+
+ const onData = (recordedBlob) => {
+ // You can use this function if you want to perform any operation with the blob data in real-time.
+ };
+
+ const onStop = (recordedBlob) => {
+ setAudioBlob(recordedBlob.blob);
+ transcribeAudio(recordedBlob.blob);
+ };
+
+ const transcribeAudio = async (audioBlob) => {
+ setRecognizing(true);
+ try {
+ const formData = new FormData();
+ formData.append('file', audioBlob, 'audio.wav');
+ formData.append('model', 'whisper-1');
+
+ const response = await axios.post('https://api.openai.com/v1/audio/transcriptions', formData, {
+ headers: {
+ 'Content-Type': 'multipart/form-data',
+ Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
+ },
+ });
+
+ setTranscript(response.data.text);
+ } catch (error) {
+ console.error('Error transcribing audio:', error.response ? error.response.data : error.message);
+ Swal.fire({
+ icon: 'error',
+ title: 'Oops...',
+ text: 'Failed to transcribe audio. Please try again later.',
+ });
+ } finally {
+ setRecognizing(false);
+ }
+ };
+
+ return (
+