Code for controlling mbed hardware (LED's, motors), as well as code for the Raspberry Pi to run a Support Vector Machine that identifies objects using the Pi camera
Dependencies: mbed Motordriver mbed-rtos PololuLedStrip
pi/main.py@3:a3ed7ff99772, 2019-12-06 (annotated)
- Committer:
- arogliero3
- Date:
- Fri Dec 06 00:58:02 2019 -0500
- Revision:
- 3:a3ed7ff99772
- Parent:
- 1:183a8efd562a
update img6
Who changed what in which revision?
User | Revision | Line number | New contents of line |
---|---|---|---|
arogliero3 | 1:183a8efd562a | 1 | import io |
arogliero3 | 1:183a8efd562a | 2 | import time |
arogliero3 | 1:183a8efd562a | 3 | from Classifier import ImageClassifier |
arogliero3 | 1:183a8efd562a | 4 | import numpy as np |
arogliero3 | 1:183a8efd562a | 5 | from PIL import Image |
arogliero3 | 1:183a8efd562a | 6 | from joblib import dump, load |
arogliero3 | 1:183a8efd562a | 7 | import picamera |
arogliero3 | 1:183a8efd562a | 8 | import serial |
arogliero3 | 1:183a8efd562a | 9 | |
arogliero3 | 1:183a8efd562a | 10 | |
arogliero3 | 1:183a8efd562a | 11 | |
arogliero3 | 1:183a8efd562a | 12 | print("Starting camera") |
arogliero3 | 1:183a8efd562a | 13 | camera = picamera.PiCamera() |
arogliero3 | 1:183a8efd562a | 14 | camera.resolution = (352, 240) |
arogliero3 | 1:183a8efd562a | 15 | camera.color_effects = (128, 128) # turn camera to black and white |
arogliero3 | 1:183a8efd562a | 16 | |
arogliero3 | 1:183a8efd562a | 17 | mbed = serial.Serial('/dev/ttyACM0', baudrate=9600) |
arogliero3 | 1:183a8efd562a | 18 | time.sleep(1) |
arogliero3 | 1:183a8efd562a | 19 | |
arogliero3 | 1:183a8efd562a | 20 | print("starting model training") |
arogliero3 | 1:183a8efd562a | 21 | |
arogliero3 | 1:183a8efd562a | 22 | try: |
arogliero3 | 1:183a8efd562a | 23 | img_clf = load('model.joblib') |
arogliero3 | 1:183a8efd562a | 24 | except Exception as e: |
arogliero3 | 1:183a8efd562a | 25 | img_clf = ImageClassifier() |
arogliero3 | 1:183a8efd562a | 26 | # load images |
arogliero3 | 1:183a8efd562a | 27 | (train_raw, train_labels) = img_clf.load_data_from_folder('./train/') |
arogliero3 | 1:183a8efd562a | 28 | # convert images into features |
arogliero3 | 1:183a8efd562a | 29 | train_data = img_clf.extract_image_features(train_raw) |
arogliero3 | 1:183a8efd562a | 30 | # train model and test on training data |
arogliero3 | 1:183a8efd562a | 31 | img_clf.train_classifier(train_data, train_labels) |
arogliero3 | 1:183a8efd562a | 32 | # dump classifier into file for later use |
arogliero3 | 1:183a8efd562a | 33 | dump(img_clf, 'model.joblib') |
arogliero3 | 1:183a8efd562a | 34 | |
arogliero3 | 1:183a8efd562a | 35 | print("Model Trained") |
arogliero3 | 1:183a8efd562a | 36 | |
arogliero3 | 1:183a8efd562a | 37 | while True: |
arogliero3 | 1:183a8efd562a | 38 | stream = io.BytesIO() |
arogliero3 | 1:183a8efd562a | 39 | camera.capture(stream, format="bmp", resize=(352, 240)) |
arogliero3 | 1:183a8efd562a | 40 | image = Image.open(stream) |
arogliero3 | 1:183a8efd562a | 41 | data = np.array(image) |
arogliero3 | 1:183a8efd562a | 42 | # Construct a numpy array from the stream |
arogliero3 | 1:183a8efd562a | 43 | features = img_clf.extract_image_features([data]) |
arogliero3 | 1:183a8efd562a | 44 | label = img_clf.predict_labels(features)[0] |
arogliero3 | 1:183a8efd562a | 45 | if label == "disco": |
arogliero3 | 1:183a8efd562a | 46 | mbed.write("d") |
arogliero3 | 1:183a8efd562a | 47 | elif label == "tornado": |
arogliero3 | 1:183a8efd562a | 48 | mbed.write("t") |
arogliero3 | 1:183a8efd562a | 49 | |
arogliero3 | 1:183a8efd562a | 50 |