Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
jaybdub committed Mar 9, 2019
0 parents commit bc98e0c
Show file tree
Hide file tree
Showing 33 changed files with 3,358 additions and 0 deletions.
10 changes: 10 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
cmake_minimum_required(VERSION 3.1)

project(ssd_tensorrt)

find_package(CUDA REQUIRED)

include_directories(${CUDA_INCLUDE_DIRS})
link_directories(${CUDA_TOOLKIT_ROOT_DIR}/lib64)

add_subdirectory(jetbot)
13 changes: 13 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# JetBot

[<img src="https://img.shields.io/discord/553852754058280961.svg">](https://discord.gg/Ady6NtF)

JetBot is an open-source robot based on NVIDIA Jetson Nano that is

* **Affordable** - Less than $150 add-on to Jetson Nano
* **Easy** - Loaded with lots of handy software
* **Educational** - Examples from basic motion to AI based collision avoidance
* **Fun!** - Interactively programmed from your web browser


To get started, read the [JetBot Wiki](https://github.com/NVIDIA-AI-IOT-private/jetbot/wiki).
Binary file added assets/camera_mount.stl
Binary file not shown.
Binary file added assets/caster_base.stl
Binary file not shown.
Binary file added assets/caster_shroud.stl
Binary file not shown.
Binary file added assets/chassis.stl
Binary file not shown.
1 change: 1 addition & 0 deletions jetbot/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
add_subdirectory(ssd_tensorrt)
6 changes: 6 additions & 0 deletions jetbot/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from .camera import Camera
from .heartbeat import Heartbeat
from .motor import Motor
from .robot import Robot
from .image import bgr8_to_jpeg
from .object_detection import ObjectDetector
Empty file added jetbot/app/__init__.py
Empty file.
89 changes: 89 additions & 0 deletions jetbot/app/stats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# Copyright (c) 2017 Adafruit Industries
# Author: Tony DiCola & James DeVito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time

import Adafruit_SSD1306

from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
from jetbot_robot import get_ip_address

import subprocess

# 128x32 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=None, i2c_bus=1, gpio=1) # setting gpio to 1 is hack to avoid platform detection

# Initialize library.
disp.begin()

# Clear display.
disp.clear()
disp.display()

# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))

# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)

# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)

# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0

# Load default font.
font = ImageFont.load_default()


while True:

# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)

# Shell scripts for system monitoring from here : https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )

# Write two lines of text.

draw.text((x, top), "eth0: " + str(get_ip_address('eth0')), font=font, fill=255)
draw.text((x, top+8), "wlan0: " + str(get_ip_address('wlan0')), font=font, fill=255)
draw.text((x, top+16), str(MemUsage.decode('utf-8')), font=font, fill=255)
draw.text((x, top+25), str(Disk.decode('utf-8')), font=font, fill=255)

# Display image.
disp.image(image)
disp.display()
time.sleep(1)
86 changes: 86 additions & 0 deletions jetbot/app/wander.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import argparse
import traitlets
from jetbot import Robot
from jetbot import Camera
import torch
import torchvision
import torch.nn.functional as F
import time
import cv2
import numpy as np
import signal


class WanderApplication(traitlets.HasTraits):

collision_model = traitlets.Unicode()

def __init__(self, *args, **kwargs):
super(WanderApplication, self).__init__(*args, **kwargs)
self.mean = 255.0 * np.array([0.485, 0.456, 0.406])
self.stdev = 255.0 * np.array([0.229, 0.224, 0.225])
self.normalize = torchvision.transforms.Normalize(self.mean, self.stdev)

def _preprocess(self, camera_value):
x = camera_value
x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB)
x = x.transpose((2, 0, 1))
x = torch.from_numpy(x).float()
x = self.normalize(x)
x = x.to(self.device)
x = x[None, ...]
return x

def _update(self, change):
x = change['new']
x = self._preprocess(x)
y = self.model(x)
y = F.softmax(y, dim=1)

prob_blocked = float(y.flatten()[0])

if prob_blocked < 0.5:
self.robot.forward(0.4)
else:
self.robot.left(0.4)

def start(self):
self.device = torch.device('cuda')

print('Loading model...')
# create model
self.model = torchvision.models.alexnet(pretrained=False)
self.model.classifier[6] = torch.nn.Linear(self.model.classifier[6].in_features, 2)
self.model.load_state_dict(torch.load(self.collision_model))
self.model = self.model.to(self.device)

# create robot
self.robot = Robot()

print('Initializing camera...')
# create camera
self.camera = Camera.instance(width=224, height=224)

print('Running...')
self.camera.observe(self._update, names='value')

def kill(sig, frame):
print('Shutting down...')
self.camera.stop()

signal.signal(signal.SIGINT, kill)

self.camera.thread.join()


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('collision_model', help='Path of the trained Alexnet collision model')
args = parser.parse_args()

application = WanderApplication(collision_model=args.collision_model)
application.start()




68 changes: 68 additions & 0 deletions jetbot/camera.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import traitlets
from traitlets.config.configurable import SingletonConfigurable
import atexit
import cv2
import threading
import numpy as np


class Camera(SingletonConfigurable):

value = traitlets.Any()

# config
width = traitlets.Integer(default_value=224).tag(config=True)
height = traitlets.Integer(default_value=224).tag(config=True)
fps = traitlets.Integer(default_value=21).tag(config=True)
capture_width = traitlets.Integer(default_value=3280).tag(config=True)
capture_height = traitlets.Integer(default_value=2464).tag(config=True)

def __init__(self, *args, **kwargs):
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
super(Camera, self).__init__(*args, **kwargs)

try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)

re, image = self.cap.read()

if not re:
raise RuntimeError('Could not read image from camera.')

self.value = image
self.start()
except:
self.stop()
raise RuntimeError(
'Could not initialize camera. Please see error trace.')

atexit.register(self.stop)

def _capture_frames(self):
while True:
re, image = self.cap.read()
if re:
self.value = image
else:
break

def _gst_str(self):
return 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.capture_width, self.capture_height, self.fps, self.width, self.height)

def start(self):
if not self.cap.isOpened():
self.cap.open(self._gst_str(), cv2.CAP_GSTREAMER)
if not hasattr(self, 'thread') or not self.thread.isAlive():
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()

def stop(self):
if hasattr(self, 'cap'):
self.cap.release()
if hasattr(self, 'thread'):
self.thread.join()

def restart(self):
self.stop()
self.start()
49 changes: 49 additions & 0 deletions jetbot/heartbeat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import enum
import traitlets
from traitlets.config.configurable import Configurable
import ipywidgets.widgets as widgets
import time
import threading


class Heartbeat(Configurable):
class Status(enum.Enum):
dead = 0
alive = 1

status = traitlets.UseEnum(Status, default_value=Status.dead)
running = traitlets.Bool(default_value=False)

# config
period = traitlets.Float(default_value=0.5).tag(config=True)

def __init__(self, *args, **kwargs):
super(Heartbeat, self).__init__(*args,
**kwargs) # initializes traitlets

self.pulseout = widgets.FloatText(value=time.time())
self.pulsein = widgets.FloatText(value=time.time())
self.link = widgets.jsdlink((self.pulseout, 'value'),
(self.pulsein, 'value'))
self.start()

def _run(self):
while True:
if not self.running:
break
if self.pulseout.value - self.pulsein.value >= self.period:
self.status = Heartbeat.Status.dead
else:
self.status = Heartbeat.Status.alive
self.pulseout.value = time.time()
time.sleep(self.period)

def start(self):
if self.running:
return
self.running = True
self.thread = threading.Thread(target=self._run)
self.thread.start()

def stop(self):
self.running = False
6 changes: 6 additions & 0 deletions jetbot/image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
import enum
import cv2


def bgr8_to_jpeg(value, quality=75):
return bytes(cv2.imencode('.jpg', value)[1])
38 changes: 38 additions & 0 deletions jetbot/motor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import atexit
from Adafruit_MotorHAT import Adafruit_MotorHAT
import traitlets
from traitlets.config.configurable import Configurable


class Motor(Configurable):

value = traitlets.Float()

# config
alpha = traitlets.Float(default_value=1.0).tag(config=True)
beta = traitlets.Float(default_value=0.0).tag(config=True)

def __init__(self, driver, channel, *args, **kwargs):
super(Motor, self).__init__(*args, **kwargs) # initializes traitlets

self._driver = driver
self._motor = self._driver.getMotor(channel)
atexit.register(self._release)

@traitlets.observe('value')
def _observe_value(self, change):
self._write_value(change['new'])

def _write_value(self, value):
"""Sets motor value between [-1, 1]"""
mapped_value = int(255.0 * (self.alpha * value + self.beta))
speed = min(max(abs(mapped_value), 0), 255)
self._motor.setSpeed(speed)
if mapped_value < 0:
self._motor.run(Adafruit_MotorHAT.FORWARD)
else:
self._motor.run(Adafruit_MotorHAT.BACKWARD)

def _release(self):
"""Stops motor by releasing control"""
self._motor.run(Adafruit_MotorHAT.RELEASE)
Loading

0 comments on commit bc98e0c

Please sign in to comment.