Update command line examples and help strings.

Change-Id: Ide53d9e0c0094757037d41c6a733ac79843ead0f
diff --git a/bin/edgetpu_demo b/bin/edgetpu_demo
index fbab807..d985942 100755
--- a/bin/edgetpu_demo
+++ b/bin/edgetpu_demo
@@ -1,8 +1,7 @@
 #!/bin/bash
 
 #readonly VIDEO_FILE=""
-readonly EDGETPU_DIR=$(python3 -c 'import edgetpu; import os; print(os.path.dirname(edgetpu.__file__))')
-readonly TEST_DATA_DIR="${EDGETPU_DIR}/test_data"
+readonly TEST_DATA="$(python3 -c 'import edgetpu; import os; print(os.path.dirname(edgetpu.__file__))')/test_data"
 readonly TPU_MODEL_FILE="${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite"
 readonly LABELS_FILE="${TEST_DATA}/coco_labels.txt"
 
diff --git a/edgetpuvision/classify.py b/edgetpuvision/classify.py
index 7aec55c..4e86077 100644
--- a/edgetpuvision/classify.py
+++ b/edgetpuvision/classify.py
@@ -1,10 +1,10 @@
 """A demo which runs object classification on camera frames."""
 
-#export TEST_DATA=/usr/lib/python3.5/dist-packages/edgetpu/test_data/
+# export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
 #
-# python3 classify.py \
-#   --model=${TEST_DATA}/mobilenet_v1_1.0_224_quant_edgetpu.tflite \
-#   --labels=${TEST_DATA}/imagenet_labels.txt
+# python3 -m edgetpuvision.classify \
+#   --model ${TEST_DATA}/mobilenet_v2_1.0_224_inat_bird_quant.tflite \
+#   --labels ${TEST_DATA}/inat_bird_labels.txt
 
 import argparse
 import collections
@@ -74,24 +74,24 @@
 def main():
     parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument('--source',
-                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file',
+                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
                         default='/dev/video0:YUY2:1280x720:30/1')
     parser.add_argument('--downscale', type=float, default=2.0,
-                        help='Downscale factor for .mp4 file rendering.')
+                        help='Downscale factor for .mp4 file rendering')
     parser.add_argument('--model', required=True,
-                        help='.tflite model path.')
+                        help='.tflite model path')
     parser.add_argument('--labels', required=True,
-                        help='label file path.')
+                        help='label file path')
     parser.add_argument('--window', type=int, default=10,
-                        help='number of frames to accumulate inference results.')
+                        help='number of frames to accumulate inference results')
     parser.add_argument('--top_k', type=int, default=3,
-                        help='number of classes with highest score to display.')
+                        help='number of classes with highest score to display')
     parser.add_argument('--threshold', type=float, default=0.1,
-                        help='class score threshold.')
+                        help='class score threshold')
     parser.add_argument('--print', action='store_true', default=False,
-                        help='Print detected classes to console.')
+                        help='Print detected classes to console')
     parser.add_argument('--fullscreen', default=False, action='store_true',
-                        help='Fullscreen rendering.')
+                        help='Fullscreen rendering')
     args = parser.parse_args()
 
     if not gstreamer.run_gen(render_gen(args),
diff --git a/edgetpuvision/classify_server.py b/edgetpuvision/classify_server.py
index 65695c2..4c43372 100644
--- a/edgetpuvision/classify_server.py
+++ b/edgetpuvision/classify_server.py
@@ -1,10 +1,10 @@
 """A demo which runs object classification and streams video to the browser."""
 
-#export TEST_DATA=/usr/lib/python3.5/dist-packages/edgetpu/test_data/
+# export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
 #
-# python3 classify_server.py \
-#   --model=${TEST_DATA}/mobilenet_v1_1.0_224_quant_edgetpu.tflite \
-#   --labels=${TEST_DATA}/imagenet_labels.txt
+# python3 -m edgetpuvision.classify_server \
+#   --model ${TEST_DATA}/mobilenet_v2_1.0_224_inat_bird_quant.tflite \
+#   --labels ${TEST_DATA}/inat_bird_labels.txt
 
 import argparse
 import logging
@@ -24,16 +24,16 @@
 
     parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument('--source',
-                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file',
+                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
                         default='/dev/video0:YUY2:1280x720:30/1')
     parser.add_argument('--model', required=True,
-                        help='.tflite model path.')
+                        help='.tflite model path')
     parser.add_argument('--labels', required=True,
-                        help='label file path.')
+                        help='label file path')
     parser.add_argument('--top_k', type=int, default=3,
-                        help='number of classes with highest score to display.')
+                        help='number of classes with highest score to display')
     parser.add_argument('--threshold', type=float, default=0.1,
-                        help='class score threshold.')
+                        help='class score threshold')
     args = parser.parse_args()
 
     engine = ClassificationEngine(args.model)
diff --git a/edgetpuvision/detect.py b/edgetpuvision/detect.py
index f46007d..cc8894a 100644
--- a/edgetpuvision/detect.py
+++ b/edgetpuvision/detect.py
@@ -1,15 +1,15 @@
 """A demo which runs object detection on camera frames."""
 
-# export TEST_DATA=/usr/lib/python3.5/dist-packages/edgetpu/test_data/
+# export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
 #
 # Run face detection model:
-# python3 detect.py \
-#   --model=${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
+# python3 -m edgetpuvision.detect \
+#   --model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
 #
 # Run coco model:
-# python3 detect.py \
-#   --model=${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
-#   --labels=${TEST_DATA}/coco_labels.txt
+# python3 -m edgetpuvision.detect \
+#   --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
+#   --labels ${TEST_DATA}/coco_labels.txt
 
 import argparse
 import itertools
@@ -58,21 +58,21 @@
 def main():
     parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument('--source',
-                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file',
+                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
                         default='/dev/video0:YUY2:1280x720:30/1')
     parser.add_argument('--downscale', type=float, default=2.0,
-                        help='Downscale factor for .mp4 file rendering.')
+                        help='Downscale factor for video/image file rendering')
     parser.add_argument('--model',
-                        help='.tflite model path.', required=True)
+                        help='.tflite model path', required=True)
     parser.add_argument('--labels',
-                        help='labels file path.')
+                        help='labels file path')
     parser.add_argument('--top_k', type=int, default=50,
-                        help='Max number of objects to detect.')
+                        help='Max number of objects to detect')
     parser.add_argument('--threshold', type=float, default=0.1,
-                        help='Detection threshold.')
+                        help='Detection threshold')
     parser.add_argument('--filter', default=None)
     parser.add_argument('--fullscreen', default=False, action='store_true',
-                        help='Fullscreen rendering.')
+                        help='Fullscreen rendering')
     args = parser.parse_args()
 
     if not gstreamer.run_gen(render_gen(args),
diff --git a/edgetpuvision/detect_server.py b/edgetpuvision/detect_server.py
index afcb359..906020b 100644
--- a/edgetpuvision/detect_server.py
+++ b/edgetpuvision/detect_server.py
@@ -1,15 +1,15 @@
 """A demo which runs object detection and streams video to the browser."""
 
-# export TEST_DATA=/usr/lib/python3.5/dist-packages/edgetpu/test_data/
+# export TEST_DATA=/usr/lib/python3/dist-packages/edgetpu/test_data
 #
 # Run face detection model:
-# python3 detect_server.py \
-#   --model=${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
+# python3 -m edgetpuvision.detect_server \
+#   --model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
 #
 # Run coco model:
-# python3 detect_server.py \
-#   --model=${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite
-#   --labels=${TEST_DATA}/coco_labels.txt
+# python3 -m edgetpuvision.detect_server \
+#   --model ${TEST_DATA}/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite \
+#   --labels ${TEST_DATA}/coco_labels.txt
 
 import argparse
 import logging
@@ -29,16 +29,16 @@
 
     parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
     parser.add_argument('--source',
-                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file',
+                        help='/dev/videoN:FMT:WxH:N/D or .mp4 file or image file',
                         default='/dev/video0:YUY2:1280x720:30/1')
     parser.add_argument('--model',
-                        help='.tflite model path.', required=True)
+                        help='.tflite model path', required=True)
     parser.add_argument('--labels',
-                        help='labels file path.')
+                        help='labels file path')
     parser.add_argument('--top_k', type=int, default=50,
-                        help='Max number of objects to detect.')
+                        help='Max number of objects to detect')
     parser.add_argument('--threshold', type=float, default=0.1,
-                        help='Detection threshold.')
+                        help='Detection threshold')
     parser.add_argument('--filter', default=None)
     args = parser.parse_args()