Mbed Clock application using an NTP connection to get internet time and a terminal interface to send commands

Dependencies:   4DGL-uLCD-SE EthernetInterface NTPClient mbed-rtos mbed SDFileSystem wavfile

Revision:
6:23c3adb0470d
Parent:
5:ba94b62d86c9
--- a/SpeechServerSource/SpeechServer.c	Tue Dec 09 15:53:31 2014 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,204 +0,0 @@
-// THIS IS ACTUALLY A .cs FILE! Mbed only supports .c and .cpp files in repositories
-/*
-using System;
-using System.IO;
-using System.Net;
-using System.Net.Sockets;
-using System.Text;
-using System.Speech.Recognition;
-using System.Speech.Recognition.SrgsGrammar;
-using System.Collections.Generic;
-using System.Timers;
-using System.Speech.Synthesis;
-using System.Speech.AudioFormat;
-
-namespace SpeechServer
-{
-    class Program
-    {
-        static Timer timeout;
-        static bool responseSent;
-        static TcpClient client;
-        static SpeechSynthesizer synth;
-
-        static void Main(string[] args)
-        {
-            timeout = new Timer(2000);
-            timeout.AutoReset = false;
-            timeout.Elapsed += OnSpeechRecognitionFailed;
-            responseSent = false;
-
-            Grammar g = new Grammar(new SrgsDocument("grammar.xml"));
-
-            // Recognizing Speech
-            SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
-            recognizer.LoadGrammarAsync(g);
-            recognizer.SpeechRecognized +=
-                new EventHandler<SpeechRecognizedEventArgs>(recognizer_SpeechRecognized);
-            recognizer.SpeechRecognitionRejected +=
-                new EventHandler<SpeechRecognitionRejectedEventArgs>(recognizer_SpeechRecognitionRejected);
-            // Synthesizing Speech
-            synth = new SpeechSynthesizer();
-            synth.SpeakCompleted += new EventHandler<SpeakCompletedEventArgs>(synth_SpeakCompleted);
-            
-            TcpListener server = null;
-            try {
-                Int32 port = 13000;
-                //Console.WriteLine("What IP Address?");
-                //String ip = Console.ReadLine();
-                //IPAddress hostaddr = IPAddress.Parse(ip);
-                IPHostEntry host = Dns.GetHostEntry("laptop-pc");
-                IPAddress hostaddr = null;
-                foreach (IPAddress ipaddr in host.AddressList) {
-                    if (ipaddr.AddressFamily == AddressFamily.InterNetwork)
-                    {
-                        hostaddr = ipaddr;
-                    }
-                }
-                Console.WriteLine("Listening on Address: {0}", hostaddr.ToString());
-
-                server = new TcpListener(hostaddr, port);
-                server.Start();
-
-                Byte[] bytes = new Byte[1024];
-
-                while (true)
-                {
-                    Console.Write("Waiting for a connection...");
-                    client = server.AcceptTcpClient();
-                    Console.WriteLine("Connected!");
-                    recognizer.SetInputToNull();
-
-                    NetworkStream stream = client.GetStream();
-                    int i;
-
-                    using (FileStream fs = File.Open("test.wav", FileMode.Create, FileAccess.Write, FileShare.None))
-                    {
-                        int sum = 0;
-                        while (sum < 110296)
-                        {
-                            i = stream.Read(bytes, 0, bytes.Length);
-                            sum += i;
-                            fs.Write(bytes, 0, i);
-                        }
-                    }
-
-                    recognizer.SetInputToWaveFile("test.wav");
-                    recognizer.RecognizeAsync(RecognizeMode.Single);
-                    timeout.Start();
-
-                    while (!responseSent)
-                    {
-                        System.Threading.Thread.Sleep(100);
-                    }
-                    responseSent = false;
-                    System.Threading.Thread.Sleep(1000);
-                    client.Close();
-                }
-            }
-            catch (SocketException e)
-            {
-                Console.WriteLine("SocketException: {0}", e);
-            }
-            finally
-            {
-                server.Stop();
-            }
-            Console.WriteLine("\nHit enter to continue...");
-            Console.Read();
-        }
-
-        static void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
-        {
-            timeout.Stop();
-            String toSend = e.Result.Semantics["command"].Value.ToString();
-            String minute = "0";
-            String hour = "0";
-            String period = "0";
-            String timezone = "0";
-
-            // The following code illustrates some of the information available
-            // in the recognition result.
-            Console.WriteLine("Recognition result summary:");
-            Console.WriteLine(
-              "  Recognized phrase: {0}\n" +
-              "  Confidence score {1}\n" +
-              "  Grammar used: {2}\n",
-              e.Result.Text, e.Result.Confidence, e.Result.Grammar.Name);
-
-            // Display the semantic values in the recognition result.
-            Console.WriteLine("  Semantic results:");
-            foreach (KeyValuePair<String, SemanticValue> child in e.Result.Semantics)
-            {
-                Console.WriteLine("    The {0} value is {1}",
-                  child.Key, child.Value.Value ?? "null");
-            }
-            if (e.Result.Semantics.ContainsKey("time"))
-            {
-                Console.WriteLine("  In Time:");
-                foreach (KeyValuePair<String, SemanticValue> child in e.Result.Semantics["time"])
-                {
-                    Console.WriteLine("    The {0} value is {1}",
-                      child.Key, child.Value.Value ?? "null");
-                }
-                hour = e.Result.Semantics["time"]["hour"].Value.ToString();
-                minute = e.Result.Semantics["time"]["minute"].Value.ToString();
-                period = e.Result.Semantics["time"]["period"].Value.ToString();
-            }
-            else if (e.Result.Semantics.ContainsKey("length"))
-            {
-                Console.WriteLine("  In Length:");
-                foreach (KeyValuePair<String, SemanticValue> child in e.Result.Semantics["length"])
-                {
-                    Console.WriteLine("    The {0} value is {1}",
-                      child.Key, child.Value.Value ?? "null");
-                }
-                hour = e.Result.Semantics["length"]["hour"].Value.ToString();
-                minute = e.Result.Semantics["length"]["minute"].Value.ToString();
-            }
-            else if (e.Result.Semantics.ContainsKey("zone"))
-            {
-                timezone = e.Result.Semantics["zone"].Value.ToString();
-            }
-            toSend += " " + hour + " " + minute + " " + period + " " + timezone;
-            Console.WriteLine("To Send: \"{0}\"", toSend);
-            Console.WriteLine();
-
-            byte[] msg = System.Text.Encoding.ASCII.GetBytes(toSend);
-
-            client.GetStream().Write(msg, 0, msg.Length);
-            responseSent = true;
-        }
-
-        static void recognizer_SpeechRecognitionRejected(object sender, SpeechRecognitionRejectedEventArgs e)
-        {
-            Console.WriteLine("Speech input was rejected.");
-            foreach (RecognizedPhrase phrase in e.Result.Alternates)
-            {
-                Console.WriteLine("  Rejected phrase: " + phrase.Text);
-                Console.WriteLine("  Confidence score: " + phrase.Confidence);
-            }
-        }
-
-        private static void OnSpeechRecognitionFailed(Object source, ElapsedEventArgs e)
-        {
-            Console.WriteLine("The Elapsed event was raised at {0}", e.SignalTime);
-
-            byte[] msg = System.Text.Encoding.ASCII.GetBytes("noCommand 0 0 0 0");
-
-            client.GetStream().Write(msg, 0, msg.Length);
-
-            responseSent = true;
-
-            //synth.SetOutputToWaveFile("response.wav", new SpeechAudioFormatInfo(11025, AudioBitsPerSample.Sixteen, AudioChannel.Mono));
-            //synth.SpeakAsync("testing this");
-        }
-
-        static void synth_SpeakCompleted(object sender, SpeakCompletedEventArgs e)
-        {
-            synth.SetOutputToNull();
-            Console.WriteLine("Speaking Complete");
-        }
-    }
-}
-*/
\ No newline at end of file