top of page

We used a Kinect to detect depth using infrared sensors, and build a skeleton based off of a person’s body proportions. We translated this into a PVectors in order to continue to track the skeleton in processing programs. Each joint was given a number in order to reference them in later programs. This is a key of the joint numbers.

Skeleton Activity

/* --------------------------------------------------------------------------
* SimpleOpenNI User Test
* --------------------------------------------------------------------------
* Processing Wrapper for the OpenNI/Kinect library
* http://code.google.com/p/simple-openni
* --------------------------------------------------------------------------
* prog:  Max Rheiner / Interaction Design / zhdk / http://iad.zhdk.ch/
* date:  02/16/2011 (m/d/y)
* ----------------------------------------------------------------------------
* Modified for AT CS Principles: Baker Franke, Feb.2012
*/

import SimpleOpenNI.*;
SimpleOpenNI  myKinect;

void setup()
{

   myKinect = new SimpleOpenNI(this);
  
  // enable depthMap generation
  if(myKinect.enableDepth() == false)
  {
     println("Can't open the depthMap, maybe the camera is not connected!");
     exit();
     return;
  }
 
  // enable skeleton generation for all joints
  myKinect.enableUser(SimpleOpenNI.SKEL_PROFILE_ALL);
 
  //Mirroring makes my brain hurt less.
  myKinect.setMirror(true);

  //Enable the RGB Camera
  myKinect.enableRGB();
 
 

  smooth();
  size(myKinect.depthWidth(), myKinect.depthHeight());
 
  background(200,0,0);

}

void draw()
{
  background(0);

  //Get new data from the Kinect
  myKinect.update();
  //Draw camera image on the screen
  image(myKinect.rgbImage(),0,0);

  //Each new person is identified with a number, so draw up to 5 people
  for(int userId=1; userId<=5; userId++){
    //Check to see if trackin
    if(myKinect.isTrackingSkeleton(userId)){
      stroke(255,0,0);
      strokeWeight(3);
      drawSkeleton(userId);
  
    
      //There are 24 possible joints that openNI tracks.  If we can get the point, draw it.    
      for(int bodyPart=1; bodyPart<=24; bodyPart++){
       
          //get the point as a vector
          PVector bodyPoint = getBodyPoint(userId, bodyPart);
     
          fill(0,255,0,128);
          ellipse(bodyPoint.x, bodyPoint.y, 15, 15);
      }
     
      //draw the head bigger -- Demonstrates use of Constant SKEL_HEAD
      PVector headPoint = getBodyPoint(userId, SimpleOpenNI.SKEL_HEAD);
      ellipse(headPoint.x, headPoint.y, 50,50);
     
    }
  }
 
}


/**
* Translate vector for point on Skeleton from REAL 3D space into
* 2D projection space for drawing.
*
*/
PVector getBodyPoint(int user, int bodyPart){
  
   PVector jointPos=new PVector(), jointPos_Proj=new PVector();
   myKinect.getJointPositionSkeleton(user, bodyPart, jointPos);
   myKinect.convertRealWorldToProjective(jointPos,jointPos_Proj);
   return jointPos_Proj;
}


// draw the skeleton with the selected joints
//Simple OpenNI has a method called drawLimb, which simply draws a line
// between two body points
void drawSkeleton(int userId)
{

 
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_HEAD, SimpleOpenNI.SKEL_NECK);

  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_LEFT_SHOULDER);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_LEFT_ELBOW);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_ELBOW, SimpleOpenNI.SKEL_LEFT_HAND);

  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_NECK, SimpleOpenNI.SKEL_RIGHT_SHOULDER);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_RIGHT_ELBOW);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_ELBOW, SimpleOpenNI.SKEL_RIGHT_HAND);

  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_SHOULDER, SimpleOpenNI.SKEL_TORSO);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_SHOULDER, SimpleOpenNI.SKEL_TORSO);

  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_LEFT_HIP);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_HIP, SimpleOpenNI.SKEL_LEFT_KNEE);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_LEFT_KNEE, SimpleOpenNI.SKEL_LEFT_FOOT);

  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_TORSO, SimpleOpenNI.SKEL_RIGHT_HIP);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_HIP, SimpleOpenNI.SKEL_RIGHT_KNEE);
  myKinect.drawLimb(userId, SimpleOpenNI.SKEL_RIGHT_KNEE, SimpleOpenNI.SKEL_RIGHT_FOOT); 
}

// -----------------------------------------------------------------
// SimpleOpenNI has a number of event handlers, that are triggered when
// "User events" occur.

void onNewUser(int userId)
{
  println("onNewUser - userId: " + userId);
  println("  start pose detection");
 
  myKinect.startPoseDetection("Psi",userId);
}

void onLostUser(int userId)
{
  println("onLostUser - userId: " + userId);
}

void onStartCalibration(int userId)
{
  println("onStartCalibration - userId: " + userId);
}

void onEndCalibration(int userId, boolean successfull)
{
  println("onEndCalibration - userId: " + userId + ", successfull: " + successfull);
 
  if (successfull)
  {
    println("  User calibrated !!!");
    myKinect.startTrackingSkeleton(userId);
  }
  else
  {
    println("  Failed to calibrate user !!!");
    println("  Start pose detection");
    myKinect.startPoseDetection("Psi",userId);
  }
}

void onStartPose(String pose,int userId)
{
  println("onStartPose - userId: " + userId + ", pose: " + pose);
  println(" stop pose detection");
 
  myKinect.stopPoseDetection(userId);
  myKinect.requestCalibrationSkeleton(userId, true);

}

void onEndPose(String pose,int userId)
{
  println("onEndPose - userId: " + userId + ", pose: " + pose);
}

LO 1: use computing tools and techniques to create artifacts.
LO 3: use computing tools and techniques for creative expression.
LO 4: use programming as a creative tool.
LO 5: describe the combination of abstractions used to represent data.
LO 7: develop an abstraction.
LO 8: use multiple levels of abstraction in computation.
LO 9: use models and simulations to raise and answer questions.
LO 10: use computers to process information to gain insight and knowledge.
LO 13: use large datasets to explore and discover information and knowledge.
LO 14: analyze the considerations involved in the computational manipulation of information.
LO 15: develop an algorithm.
LO 16: express an algorithm in a language.
LO 17: appropriately connect problems and potential algorithmic solutions.
LO 20: use abstraction to manage complexity in programs.
LO 21: evaluate a program for correctness.
LO 22: develop a correct program.
LO 23: employ appropriate mathematical and logical concepts in programming.
LO 29: connect computing with innovations in other fields.

bottom of page