This is the homepage for the project entitled “Stories are flowing trees” at Srishti School of Design’s Center for Experimental Media Arts.
Check back here for daily updates.
Day 1
Introduction
openFrameworks introduction:
– http://www.openframeworks.cc/
– open-source creative coding toolkit for writing software
– provides many high-level classes and abstractions which are cross-platform for a collection of libraries including: OpenCV, Vector/Matrix Math, Graphics using OpenGL, Mesh, OBJ, VBO, Shaders, Audio/Video input/output/saving, Kinect, File/Directory access/manipulation, TCP/IP/UDP/OSC, Threading, Physics engine, Synthesizers, iPhone/Android integration (GPS/Compass/UI/OpenGL/Maps/Audio/Video), …
basic program:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// declaration of functions
void setup();
void update();
void draw();
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(250, 250);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
}
// i get called in a loop that runs until the program ends
void testApp::update(){
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
}
mouse input:
test.App.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// declaration of functions
void setup();
void update();
void draw();
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(250, 250);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// don't clear the background on each update/draw loop
// ofSetBackgroundAuto(false);
}
// i get called in a loop that runs until the program ends
void testApp::update(){
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
// set the size of the circle
float radius = 3.0f;
// draw a circle at the mouse position
ofCircle(mouseX, mouseY, radius);
// note:
// the two variables, mouseX and mouseY are declared
// in the base (parent) class
}
drawing a line:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// declaration of functions
void setup();
void update();
void draw();
int counter;
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(250, 250);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// we are going to increment this variable each "frame"
counter = 0;
}
// i get called in a loop that runs until the program ends
void testApp::update(){
// update some variables
counter = counter + 1;
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
ofBackground(0);
float amplitude = 100.0f;
ofSetColor(100, 100, 100);
ofLine(0, 125, 250, 125);
ofSetColor(200, 200, 200);
for (int i = 1; i < 250; i++) {
float x1 = (float)i / 250.0;
float y1 = amplitude * sin( x1 * 2.0*PI );
float x2 = (float)(i - 1) / 250.0;
float y2 = amplitude * sin( x2 * 2.0*PI );
ofLine(x1 * 250.0f, -y1 + 125, x2 * 250.0f, -y2 + 125);
}
}
audio input:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// redeclaration of functions (declared in base class)
void setup();
void update();
void draw();
// redeclaration of two special functions which handle audio i/o (declared in base class)
void audioRequested(float *buf, int size, int ch);
void audioReceived(float *buf, int size, int ch);
// we will keep a pointer to our audio
float *audioInput;
int sampleRate, bufferSize;
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(250, 250);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// setup the sound
sampleRate = 44100;
bufferSize = 250;
ofSoundStreamSetup(2, // output channels
1, // input channels
sampleRate, // how many samples (readings) per second
bufferSize, // size of each copy of audio
4); // latency of audio
// a variable to store our audio
audioInput = new float[bufferSize];
}
// i get called in a loop that runs until the program ends
void testApp::update(){
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
ofBackground(0);
float amplitude = 100.0f;
ofSetColor(100, 100, 100);
ofLine(0, 125, 250, 125);
ofSetColor(200, 200, 200);
for (int i = 1; i < 250; i++) {
float x1 = (float)i / 250.0;
float y1 = amplitude * audioInput[i]; //amplitude * sin( x1 * 2.0*PI );
float x2 = (float)(i - 1) / 250.0;
float y2 = amplitude * audioInput[i-1]; //amplitude * sin( x2 * 2.0*PI );
ofLine(x1 * 250.0f, -y1 + 125, x2 * 250.0f, -y2 + 125);
}
}
void testApp::audioRequested(float *buf, int size, int ch)
{
}
void testApp::audioReceived(float *buf, int size, int ch)
{
// copy the data into our variable, audioInput
memcpy(audioInput, buf, sizeof(float) * size);
}
camera input:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// redeclaration of functions (declared in base class)
void setup();
void update();
void draw();
void keyPressed(int key);
ofVideoGrabber camera;
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(320, 240);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// setup the camera
camera.initGrabber(320, 240);
}
// i get called in a loop that runs until the program ends
void testApp::update(){
camera.update();
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
ofBackground(0);
// draw the camera
camera.draw(0,0);
}
void testApp::keyPressed(int key)
{
switch (key) {
case 's':
camera.videoSettings();
break;
default:
break;
}
}
Day 2
Introduction
Over the next two days, you will play with audio and video in more depth. This will include processing the input of audio and/or video to make things happen in audio and/or video. I’ll go over examples of using a camera or video as input, and processing the changes between frames to understand “motion” or “flicker”. Similarly, in audio, we’ll play with the transients in sounds to try to understand how “textures” of sound change. This gives us a perceptual measure to use in defining interactions based on audio or vision.
video player:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// redeclaration of functions (declared in base class)
void setup();
void update();
void draw();
ofVideoPlayer videoPlayer;
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// set the size of the window
ofSetWindowShape(320, 240);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// load a movie located in the project's "bin/data" directory
videoPlayer.loadMovie("sunra_pink.mov");
videoPlayer.play();
}
// i get called in a loop that runs until the program ends
void testApp::update(){
// update the current frame
videoPlayer.update();
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
ofBackground(0);
// this is a special command to change the way shapes, images, and videos are drawn.
// normally, when you draw a shape at position 0,0 with a width of 100, and a height of 100,
// the object occupies a space from point (0,0) to point (100,100).
// however, when you use OF_RECTMODE_CENTER, to draw the object in the same place, you would
// draw the shape at position 50,50, i.e. using the center of the object rather than the
// top-left corner
ofSetRectMode(OF_RECTMODE_CENTER);
// this moves all future drawing commands to the position (160,120)
ofTranslate(160, 120, 0);
// here we are rotating our world using the mouse
float degreesToRotate = (float)mouseX / (float)ofGetScreenWidth() * 360.0;
// we rotate by the degrees calculated above using ofRotate.
// the (0, 1, 0) say to rotate along the y-axis. we could put 1's in the x or z as well.
ofRotate(degreesToRotate, 0, 1, 0);
// draw the movie a position 0,0
videoPlayer.draw(0,0);
}
audio onset detection:
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
// redeclaration of functions (declared in base class)
void setup();
void update();
void draw();
// redeclaration of two special functions which handle audio i/o (declared in base class)
void audioRequested(float *buf, int size, int ch);
void audioReceived(float *buf, int size, int ch);
// we will keep a pointer to our audio
float *audioInput;
int sampleRate, bufferSize;
float rms;
vector<float> rms_values;
};
testApp.cpp
#include "testApp.h"
// here we "define" the methods we "declared" in the "testApp.h" file
// i get called once
void testApp::setup(){
// do some initialization
// set the size of the window
ofSetWindowShape(250, 250);
// the rate at which the program runs (FPS)
ofSetFrameRate(30);
// setup the sound
sampleRate = 44100;
bufferSize = 250;
ofSoundStreamSetup(2, // output channels
1, // input channels
sampleRate, // how many samples (readings) per second
bufferSize, // size of each copy of audio
4); // latency of audio
// a variable to store our audio
audioInput = new float[bufferSize];
rms_values.push_back(0);
}
// i get called in a loop that runs until the program ends
void testApp::update(){
// we can "smooth" the rms value by using feedback.
// this is a simple example of using the previous value
// with a weight (0.9) and the current value (0.1)
// rms_values.push_back(0.9 * rms_values.back() + 0.1 * rms);
// add the current rms value
rms_values.push_back(rms);
// we only keep a maximum of 25 values of the rms readings
if (rms_values.size() > 25) {
// if we have added more than 25, then we delete the first one
rms_values.erase(rms_values.begin(), rms_values.begin() + 1);
}
}
// i also get called in a loop that runs until the program ends
void testApp::draw(){
ofBackground(0);
// draw a line across the middle of the screen
ofSetColor(100, 100, 100);
ofLine(0, 125, 250, 125);
// we draw the audio input as before
ofSetColor(200, 200, 200);
float amplitude = 100.0f;
for (int i = 1; i < 250; i++) {
// get two pairs of points
float x1 = (float)i / 250.0;
float y1 = amplitude * audioInput[i]; //amplitude * sin( x1 * 2.0*PI );
float x2 = (float)(i - 1) / 250.0;
float y2 = amplitude * audioInput[i-1]; //amplitude * sin( x2 * 2.0*PI );
// draw a tiny segment of the overall line
ofLine(x1 * 250.0f, -y1 + 125,
x2 * 250.0f, -y2 + 125);
// loop until i is no longer < 250
}
// draw a circle in the middle of the screen with the size
// set by the rms value
ofSetRectMode(OF_RECTMODE_CENTER);
ofCircle(125, 125, rms * 250);
ofSetRectMode(OF_RECTMODE_CORNER);
for (int i = 1; i < rms_values.size(); i++) {
ofLine((i ) * 250.0/25.0, -rms_values[i ]*1000.0 + 250.0,
(i-1) * 250.0/25.0, -rms_values[i-1]*1000.0 + 250.0);
}
// calculate the average of the rms values
float average_rms = 0.0f;
for (int i = 0; i < rms_values.size(); i++) {
average_rms = average_rms + rms_values[i];
}
average_rms = average_rms / rms_values.size();
// calculate the variance of the rms values
float var_rms = 0.0f;
for (int i = 0; i < rms_values.size(); i++) {
var_rms = var_rms + abs(rms_values[i] - average_rms);
}
var_rms = var_rms / rms_values.size();
// now we see if the current value is outside the mean + variance
// basic statistics tells us a normally distributed function
// has a mean and a variance where 97% of the data is explained by
// 3 standard deviations. we use this principle here in detecting
// the the current rms reading is outside this probability
if (rms_values.back() > (average_rms + 2.0*var_rms)) {
// draw a rectangle to denote the detection of an onset
ofRect(0, 0, 250, 250);
}
}
void testApp::audioRequested(float *buf, int size, int ch)
{
}
void testApp::audioReceived(float *buf, int size, int ch)
{
// copy the data into our variable, audioInput
memcpy(audioInput, buf, sizeof(float) * size);
// add all the audio input values
float total = 0;
for (int i = 0; i < size; i++) {
// we add the "square" of each value so that negative numbers
// become positive. this is like thinking of the "magnitude"
total = total + (buf[i] * buf[i]);
}
// the "mean" part of the RMS, we divide by the number of audio input samples
// we added in the for loop above
total = total / (float)size;
// the "root" part of RMS, we take the square root to get our RMS reading for the
// current chunk of audio input values
rms = sqrt(total);
}
motion detection:
For this project, you will need to copy the opencvExample located in openframeworks/apps/addonsExamples/opencvExample instead of using emptyExample!
testApp.h
#pragma once
#include "ofMain.h"
#include "ofxOpenCv.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
float sum;
ofVideoGrabber vidGrabber;
ofxCvColorImage colorImg;
ofxCvGrayscaleImage grayImage;
ofxCvGrayscaleImage grayPreviousImage;
ofxCvGrayscaleImage grayDiff;
ofVideoPlayer vidPlayer;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// change the window to hold enough space for 6 movies (3x2)
ofSetWindowShape(320*3,240*2);
ofSetFrameRate(30);
// initialize our camera with a resolution of 320z240
vidGrabber.initGrabber(320,240);
// load a movie in and set it to loop, and then start it (play())
vidPlayer.loadMovie("sunra_pink.mov");
vidPlayer.setLoopState(OF_LOOP_NORMAL);
vidPlayer.play();
sum = 0;
// these are (wrappers for) opencv image containers
// we'll use for image processing
// we are going to find the difference between successive frames
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayPreviousImage.allocate(320,240);
grayDiff.allocate(320,240);
}
//--------------------------------------------------------------
void testApp::update(){
// background to black
ofBackground(0);
// update the camera
vidGrabber.update();
// set the color image (opencv container) to the camera image
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
// convert to grayscale
grayImage = colorImg;
// calculate the difference image
grayDiff = grayImage;
// compute the absolute difference with the previous frame's grayscale image
grayDiff.absDiff(grayPreviousImage);
// store the current grayscale image for the next iteration of update()
grayPreviousImage = colorImg;
// let's threshold the difference image,
// all values less than 10 are 0, all values above 10 become 255
grayDiff.threshold(10);
// here we will find the sum of all the pixels in the difference image
// this will be used for a simple measure of "motion"
sum = 0.9 * sum + 0.1 * cvSum(grayDiff.getCvImage()).val[0] / 320.0 / 240.0 / 10.0;
// let's change the speed of our movie based on the motion value we calculated
vidPlayer.setSpeed(sum);
vidPlayer.update();
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the input camera image
vidGrabber.draw(0,0);
// draw the opencv image
colorImg.draw(0,240);
// draw the grayscale conversion
grayImage.draw(320,0);
// draw the previous grayscale image
grayPreviousImage.draw(320,240);
// draw the difference image (thresholded)
grayDiff.draw(640,0);
// draw the video player
vidPlayer.draw(640,240);
// draw the sum of the motion pixels divided by the number of motion pixels
// (average of difference values)
char buf[256];
sprintf(buf, "%f", sum);
ofDrawBitmapString(buf, 20, 20);
}
audio synthesis example
This example requires maximilian which you can get from here: Maximilian on GitHub. You will need to get “maximilian.h” and “maximilian.cpp” and copy them to your project directory’s “src” folder. You will also need to “drag” these files into your visual studio or xcode project (inside the IDE).
testApp.h
#pragma once
#include "ofMain.h"
#include "maximilian.h" // new
#include "ofxOpenCv.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void audioRequested(float *buf, int size, int ch); // new
void audioReceived(float *buf, int size, int ch); // new
float sum;
ofVideoGrabber vidGrabber;
ofxCvColorImage colorImg;
ofxCvGrayscaleImage grayImage;
ofxCvGrayscaleImage grayPreviousImage;
ofxCvGrayscaleImage grayDiff;
ofxCvContourFinder contourFinder;
maxiOsc myOsc; // new
maxiDelayline myDelay;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// change the window to hold enough space for 6 movies (3x2)
ofSetWindowShape(320*3,240*2);
ofSetFrameRate(30);
// initialize our camera with a resolution of 320z240
vidGrabber.initGrabber(320,240);
sum = 0;
// these are (wrappers for) opencv image containers
// we'll use for image processing
// we are going to find the difference between successive frames
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayPreviousImage.allocate(320,240);
grayDiff.allocate(320,240);
// setup the sound
int sampleRate = 44100;
int bufferSize = 320;
ofSoundStreamSetup(1, // output channels
1, // input channels
sampleRate, // how many samples (readings) per second
bufferSize, // size of each copy of audio
4); // latency of audio
}
//--------------------------------------------------------------
void testApp::update(){
// background to black
ofBackground(0);
// update the camera
vidGrabber.update();
// set the color image (opencv container) to the camera image
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
// convert to grayscale
grayImage = colorImg;
// calculate the difference image
grayDiff = grayImage;
// compute the absolute difference with the previous frame's grayscale image
grayDiff.absDiff(grayPreviousImage);
// store the current grayscale image for the next iteration of update()
grayPreviousImage = colorImg;
// let's threshold the difference image,
// all values less than 10 are 0, all values above 10 become 255
grayDiff.threshold(10);
// here we will find the sum and then average of all the pixels in the difference image
// this will be used for a simple measure of "motion"
sum = 0.9 * sum + 0.1 * cvSum(grayDiff.getCvImage()).val[0] / 320.0 / 240.0 / 10.0;
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the input camera image
vidGrabber.draw(0,0);
// draw the opencv image
colorImg.draw(0,240);
// draw the grayscale conversion
grayImage.draw(320,0);
// draw the previous grayscale image
grayPreviousImage.draw(320,240);
// draw the difference image (thresholded)
grayDiff.draw(640,0);
// draw the sum of the motion pixels divided by the number of motion pixels
// (average of difference values)
char buf[256];
sprintf(buf, "%f", sum);
ofDrawBitmapString(buf, 20, 20);
}
void testApp::audioRequested(float *buf, int size, int ch)
{
for (int i = 0; i < size; i++) {
buf[i] = myDelay.dl( myOsc.sinewave(sum * 400.0), 8000, 0.5);
}
}
void testApp::audioReceived(float *buf, int size, int ch)
{
}
Day 3
Stick Figure
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
ofSetWindowShape(400, 400);
}
//--------------------------------------------------------------
void testApp::update(){
}
//--------------------------------------------------------------
void testApp::draw(){
ofBackground(200, 100, 100);
ofSetRectMode(OF_RECTMODE_CENTER); // OF_RECTMODE_CORNER
ofPushMatrix();
ofScale(mouseX / (float)ofGetWindowWidth() * 4.0,
mouseY / (float)ofGetWindowHeight() * 4.0,
1);
// head
ofRect(200, 50, 50, 50);
// body
ofRect(200, 150, 20, 80);
ofSetRectMode(OF_RECTMODE_CORNER);
float degress = ofMap(mouseY, 0, ofGetWindowHeight(),
45, 135, true);
// left leg
ofPushMatrix();
ofTranslate(175, 225, 0);
ofRotate(degress, 0, 0, 1);
ofRect(0, 0, 15, 80);
ofPopMatrix();
// right leg
ofPushMatrix();
ofTranslate(220, 230, 0);
ofRotate(-degress, 0, 0, 1);
ofRect(0, 0, 15, 80);
ofPopMatrix();
// left arm
ofPushMatrix();
ofTranslate(175, 100, 0);
ofRotate(degress, 0, 0, 1);
ofRect(0, 0, 15, 80);
ofPopMatrix();
/*
// right arm
ofPushMatrix();
ofTranslate(250, 100, 0);
ofRotate(45, 0, 0, 1);
ofRect(0, 0, 15, 80);
ofPopMatrix();
*/
ofPopMatrix();
}
Face Detection
testApp.h
#pragma once
#include "ofMain.h"
#include "ofxCvHaarFinder.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
ofVideoPlayer video1;
ofImage img;
ofImage img_scaled;
ofxCvHaarFinder finder;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
ofSetWindowShape(320, 240);
video1.loadMovie("beatit.mov");
img.allocate(video1.getWidth(), video1.getHeight(), OF_IMAGE_COLOR);
img_scaled.allocate(320, 240, OF_IMAGE_COLOR);
video1.play();
finder.setup("haarcascade_frontalface_default.xml");
}
//--------------------------------------------------------------
void testApp::update(){
video1.update();
img.setFromPixels(video1.getPixels(), video1.getWidth(), video1.getHeight(), OF_IMAGE_COLOR);
img_scaled = img;
img_scaled.resize(320, 240);
finder.findHaarObjects(img_scaled);
}
//--------------------------------------------------------------
void testApp::draw(){
img_scaled.draw(0, 0);
ofNoFill();
for(int i = 0; i < finder.blobs.size(); i++) {
ofRectangle cur = finder.blobs[i].boundingRect;
ofRect(cur.x, cur.y, cur.width, cur.height);
}
}
text cloud
This example requires you find a true type font (.ttf file) and a text file and place it in the “data” folder inside the “bin” folder of your project. Replace the names of these to match your own files in the code.
testApp.h
#pragma once
#include "ofMain.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
ofTrueTypeFont myFont_frabk_16;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// set the size of the window
ofSetWindowShape(400, 400);
// load in a true type font for drawing text
myFont_frabk_16.loadFont("frabk.ttf", 16);
}
//--------------------------------------------------------------
void testApp::update(){
}
//--------------------------------------------------------------
void testApp::draw(){
// let's blend our words using alpha transparency
ofEnableAlphaBlending();
// we'll set it to a low alpha since there are so many words
ofSetColor(0, 0, 0, 40);
// keep all drawing commands within this one transformation matrix
ofPushMatrix();
// translate our drawing near to the center of the screen
ofTranslate(180, 190, 0);
// create an "ifstream" object which lets us read from files
ifstream myReadFile;
// this will store the data coming from the text file directly
char output[100];
// this is a more convenient string container that openFrameworks often uses
string myString;
// we'll open the text file located in the data path
// we use this function, "ofToDataPath(...)" in order to
// prepend "milton.txt" with the location of the data folder
myReadFile.open(ofToDataPath("milton.txt").c_str());
// if we were able to open "milton.txt"
if (myReadFile.is_open())
{
// while there is still text to read, while we have not reached the EOF (end of file)
while (!myReadFile.eof())
{
// input until it reaches a whitespace (a space)
myReadFile >> output;
// convert to string
myString = output;
// keep a transformation just for the current word
ofPushMatrix();
// translate the word to a random x,y,z
ofTranslate(ofRandom(200) - 100, ofRandom(200) - 100, ofRandom(200) - 100);
// draw at the (transformed) origin
myFont_frabk_16.drawString(myString, 0, 0);
// done with this word's transformation
ofPopMatrix();
}
}
// done with our current transformation matrix
ofPopMatrix();
// done with blending
ofDisableAlphaBlending();
}
People/Blob Tracking
This is a very powerful technique I’ve developed for finding blobs/people in a camera image. It first builds a model of the “background” image. It is then able to extract “foreground” entities, or things that are not in the background image. You’ll need to include these additional files into your project’s src directory, as well as drag them into your visual studio or xcode project (in the same way as you did with maximilian): 012-people-tracking-simplified.zip
EDIT: here is an updated version with fixes for windows users: 012-people-tracking-simplified_windows_fix.zip
testApp.h
#pragma once
#include "ofMain.h"
const int W = 320;
const int H = 240;
const int WINDOW_WIDTH = W*3 + 40*2;
const int WINDOW_HEIGHT = H*1.5;
#include "ofVideoGrabber.h"
#include "pkmBlobTracker.h"
class testApp : public ofBaseApp, public ofCvBlobListener {
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void blobOn( int x, int y, int id, int order );
void blobMoved( int x, int y, int id, int order );
void blobOff( int x, int y, int id, int order );
ofVideoGrabber vidGrabber;
pkmBlobTracker orientationTracker;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(W,H);
ofSetWindowShape(WINDOW_WIDTH, WINDOW_HEIGHT);
ofSetFrameRate(60);
ofSetBackgroundAuto(true);
ofBackground(0,0,0);
orientationTracker.setListener(this);
}
//--------------------------------------------------------------
void testApp::update(){
ofBackground(0,0,0);
vidGrabber.update();
if (vidGrabber.isFrameNew()) {
orientationTracker.update(vidGrabber.getPixels(), W, H);
}
}
//--------------------------------------------------------------
void testApp::draw(){
orientationTracker.draw(0, 0);
}
//--------------------------------------------------------------
void testApp::keyPressed (int key){
switch (key){
case 's':
vidGrabber.videoSettings();
break;
case 'f':
ofToggleFullscreen();
break;
default:
orientationTracker.keyPressed(key);
break;
}
}
void testApp::blobOn( int x, int y, int id, int order )
{
}
void testApp::blobMoved( int x, int y, int id, int order )
{
}
void testApp::blobOff( int x, int y, int id, int order )
{
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
People Making Noise
We expand our people tracking example to assign sounds to each person and sonify their movements based on the speed that they move.
testApp.h
#pragma once
#include "ofMain.h"
const int W = 320;
const int H = 240;
const int WINDOW_WIDTH = W*3 + 40*2;
const int WINDOW_HEIGHT = H*1.5;
#include "ofVideoGrabber.h"
#include "pkmBlobTracker.h"
class testApp : public ofBaseApp, public ofCvBlobListener {
public:
void setup();
void update();
void draw();
void keyPressed(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void blobOn( int x, int y, int id, int order );
void blobMoved( int x, int y, int id, int order );
void blobOff( int x, int y, int id, int order );
ofVideoGrabber vidGrabber;
pkmBlobTracker orientationTracker;
vector<float> velocities;
vector<int> px;
vector<int> py;
ofSoundPlayer sound[10];
int numSounds;
int currentSound;
map<int, int> soundMapping;
map<int, int> velocityMapping;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.initGrabber(W,H);
ofSetWindowShape(WINDOW_WIDTH, WINDOW_HEIGHT);
ofSetFrameRate(60);
ofSetBackgroundAuto(true);
ofBackground(0,0,0);
orientationTracker.setListener(this);
currentSound = 0;
numSounds = 3;
sound[0].loadSound("beat.wav", true);
sound[0].setMultiPlay(true);
sound[0].setLoop(true);
sound[1].loadSound("blast.wav", true);
sound[1].setMultiPlay(true);
sound[1].setLoop(true);
sound[2].loadSound("eli1.wav", true);
sound[2].setMultiPlay(true);
sound[2].setLoop(true);
}
//--------------------------------------------------------------
void testApp::update(){
ofBackground(0,0,0);
vidGrabber.update();
if (vidGrabber.isFrameNew()) {
orientationTracker.update(vidGrabber.getPixels(), W, H);
}
}
//--------------------------------------------------------------
void testApp::draw(){
orientationTracker.draw(0, 0);
}
//--------------------------------------------------------------
void testApp::keyPressed (int key){
switch (key){
case 's':
vidGrabber.videoSettings();
break;
case 'f':
ofToggleFullscreen();
break;
default:
orientationTracker.keyPressed(key);
break;
}
}
void testApp::blobOn( int x, int y, int id, int order )
{
printf("blob on\n");
// start a sound player with speed 0
sound[currentSound].play();
sound[currentSound].setSpeed(0.0);
// keep our mappings
soundMapping[id] = currentSound;
velocityMapping[id] = velocities.size();
velocities.push_back(0);
px.push_back(x);
py.push_back(y);
currentSound = (currentSound + 1) % numSounds;
}
void testApp::blobMoved( int x, int y, int id, int order )
{
printf("blob moved\n");
int previous_x = px[velocityMapping[id]];
int previous_y = py[velocityMapping[id]];
float speed = sqrt( (x - previous_x)*(x - previous_x) +
(y - previous_y)*(y - previous_y) ) / 4.0f;
px[velocityMapping[id]] = x;
py[velocityMapping[id]] = y;
velocities[velocityMapping[id]] = 0.9 * velocities[velocityMapping[id]] + 0.1 * speed;
printf("%f\n", velocities[velocityMapping[id]]);
sound[soundMapping[id]].setSpeed(velocities[velocityMapping[id]]);
}
void testApp::blobOff( int x, int y, int id, int order )
{
printf("blob off\n");
sound[soundMapping[id]].setSpeed(0);
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
Day 4
Introduction
Today we will develop our ideas in sonification of movements a bit further. We will also work with projection mapping using some custom software I’ve built for openframeworks. Get the files here: https://github.com/pkmital/pkmProjectionMapper
Projection Mapping
testApp.h
#pragma once
#include "ofMain.h"
#include "pkmProjectionMapper.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed (int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
ofVideoPlayer video1, video2;
pkmProjectionMapper myMapper1, myMapper2;
bool bDrawBoxes;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// we'll always use a 320x240 window, but we can reshape it
ofSetWindowShape(320, 240);
// load a movie
video1.loadMovie("pingu.mov");
video1.setVolume(0);
video1.play();
// load a second movie
video2.loadMovie("beatit.mov");
video2.setVolume(0);
video2.play();
// this is how we setup our projection mapper
// we give it our windows dimensions, and a starting x, y (0,0)
myMapper1.initialize(320, 240, 0, 0);
// similarly, for our second projection mapper, we give it another
myMapper2.initialize(320, 240, 40, 40);
bDrawBoxes = true;
}
//--------------------------------------------------------------
void testApp::update(){
// we have to update everything
video1.update();
video2.update();
myMapper1.update();
myMapper2.update();
}
//--------------------------------------------------------------
void testApp::draw(){
// give us a black background
ofBackground(0);
// we can do blending for our textures if we wish
ofEnableAlphaBlending();
ofEnableBlendMode(OF_BLENDMODE_SCREEN);
ofSetColor(255, 255, 255, 255);
// this is how we start our mapping
myMapper1.startMapping();
// any drawing we would like to be remapped goes inside here
ofCircle(160, 120, 120);
// video1.draw(0, 0, 320, 240);
// now we are done with the projection mapping
myMapper1.stopMapping();
// again for another projection surface, we create another mapper
myMapper2.startMapping();
// and any drawing commands inside here are remapped
video2.draw(0, 0, 320, 240);
// done remapping
myMapper2.stopMapping();
// we can draw the bounding boxes around the projection mapper
if (bDrawBoxes) {
myMapper1.drawBoundingBox();
myMapper2.drawBoundingBox();
}
// done blending
ofDisableAlphaBlending();
}
//--------------------------------------------------------------
void testApp::keyPressed(int key){
// we have interaction for 2 key strokes
// if the keyboard is pressed with b
if (key == 'b') {
// we start/stop drawing bounding boxes
bDrawBoxes = !bDrawBoxes;
}
// we can also make our window fullscreen by pressing 'f'
else if(key == 'f') {
ofToggleFullscreen();
}
}
//--------------------------------------------------------------
void testApp::keyReleased(int key){
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
myMapper1.mouseDragged(x, y);
myMapper2.mouseDragged(x, y);
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
myMapper1.mousePressed(x, y);
myMapper2.mousePressed(x, y);
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
myMapper1.mouseReleased(x, y);
myMapper2.mouseReleased(x, y);
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void testApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void testApp::dragEvent(ofDragInfo dragInfo){
}
Sphere Projection Mapping
testApp.h
#pragma once
#include "ofMain.h"
#include "pkmProjectionMapper.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
void keyPressed (int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
void dragEvent(ofDragInfo dragInfo);
void gotMessage(ofMessage msg);
ofVideoPlayer video1, video2;
GLUquadricObj *quadric;
pkmProjectionMapper myMapper1, myMapper2;
bool bDrawBoxes;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// we'll always use a 320x240 window, but we can reshape it
ofSetWindowShape(320, 240);
// load a movie
video1.loadMovie("pingu.mov");
video1.setVolume(0);
video1.play();
// load a second movie
video2.loadMovie("beatit.mov");
video2.setVolume(0);
video2.play();
// this is how we setup our projection mapper
// we give it our windows dimensions, and a starting x, y (0,0)
myMapper1.initialize(320, 240, 0, 0);
// similarly, for our second projection mapper, we give it another
myMapper2.initialize(320, 240, 40, 40);
// set our frame rate
ofSetFrameRate(30);
// this changes our drawing rate to match our screen's refresh rate
ofSetVerticalSync(true);
// needed for textures to work with gluSphere
ofDisableArbTex();
//prepare quadric for sphere
quadric = gluNewQuadric();
gluQuadricTexture(quadric, GL_TRUE);
gluQuadricNormals(quadric, GLU_SMOOTH);
bDrawBoxes = true;
}
//--------------------------------------------------------------
void testApp::update(){
// we have to update everything
video1.update();
video2.update();
myMapper1.update();
myMapper2.update();
}
//--------------------------------------------------------------
void testApp::draw(){
// we are going to use 3D for our sphere
glEnable(GL_DEPTH_TEST);
// give us a black background
ofBackground(0);
ofSetColor(255, 255, 255);
// this is how we start our mapping
myMapper1.startMapping();
ofTranslate(160, 120, 0);
//rotate sphere over time
ofRotateY(ofGetFrameNum());
ofRotateX(-90); //north pole facing up
//bind and draw texture to texture a sphere
ofImage image1;
image1.allocate(320, 240, OF_IMAGE_COLOR);
image1.setFromPixels(video1.getPixels(), 320, 240, OF_IMAGE_COLOR);
image1.resize(256, 256);
image1.getTextureReference().bind();
// draw a sphere
gluSphere(quadric, 200, 100, 100);
// video1.draw(0, 0, 320, 240);
// now we are done with the projection mapping
myMapper1.stopMapping();
// again for another projection surface, we create another mapper
myMapper2.startMapping();
// and any drawing commands inside here are remapped
video2.draw(0, 0, 320, 240);
// done remapping
myMapper2.stopMapping();
// we are done with 3D drawing now
glDisable(GL_DEPTH_TEST);
// we can draw the bounding boxes around the projection mapper
if (bDrawBoxes) {
myMapper1.drawBoundingBox();
myMapper2.drawBoundingBox();
}
}
//--------------------------------------------------------------
void testApp::keyPressed(int key){
// we have interaction for 2 key strokes
// if the keyboard is pressed with b
if (key == 'b') {
// we start/stop drawing bounding boxes
bDrawBoxes = !bDrawBoxes;
}
// we can also make our window fullscreen by pressing 'f'
else if(key == 'f') {
ofToggleFullscreen();
}
}
//--------------------------------------------------------------
void testApp::keyReleased(int key){
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
myMapper1.mouseDragged(x, y);
myMapper2.mouseDragged(x, y);
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
myMapper1.mousePressed(x, y);
myMapper2.mousePressed(x, y);
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
myMapper1.mouseReleased(x, y);
myMapper2.mouseReleased(x, y);
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
//--------------------------------------------------------------
void testApp::gotMessage(ofMessage msg){
}
//--------------------------------------------------------------
void testApp::dragEvent(ofDragInfo dragInfo){
}
Day 5
Introduction
Today we will have an open-lab where you can work on putting some of the things I’ve shown you into practice. We will also look at one example of using emergent flocking behavior using a library developed by Marco Gillies at Goldsmiths. Get the additional files required here: [boid.zip]
EDIT: Here is an updated boid for the latest openFrameworks (v07): [boid-fixed.zip]
Boids
testApp.h
#ifndef _TEST_APP
#define _TEST_APP
#include "ofMain.h"
#include <vector>
#include "boid.h"
class testApp : public ofBaseApp{
public:
~testApp();
void setup();
void update();
void draw();
void keyPressed(int key);
void keyReleased(int key);
void mouseMoved(int x, int y );
void mouseDragged(int x, int y, int button);
void mousePressed(int x, int y, int button);
void mouseReleased(int x, int y, int button);
void windowResized(int w, int h);
bool bFullscreen;
vector<Boid *> boids;
};
#endif
testApp.cpp
#include "testApp.h"
testApp::~testApp()
{
for (int i = 0; i < boids.size(); i++)
{
delete boids[i];
}
}
//--------------------------------------------------------------
void testApp::setup(){
ofSetWindowPosition(20, 20);
ofSetWindowShape(640, 480);
ofSetFrameRate(60);
ofBackground(0,50,50);
// set up the boids
for (int i = 0; i < 50; i++)
boids.push_back(new Boid());
}
//--------------------------------------------------------------
void testApp::update(){
ofVec3f min_pos(0, 0);
ofVec3f max_pos(ofGetWidth(), ofGetHeight());
for (int i = 0; i < boids.size(); i++)
{
boids[i]->update(boids, min_pos, max_pos);
}
}
//--------------------------------------------------------------
void testApp::draw(){
ofSetupScreen();
for (int i = 0; i < boids.size(); i++)
{
boids[i]->draw();
}
}
//--------------------------------------------------------------
void testApp::keyPressed(int key){
if(key == 'f'){
ofToggleFullscreen();
}
}
//--------------------------------------------------------------
void testApp::keyReleased(int key){
}
//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y ){
}
//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button){
}
//--------------------------------------------------------------
void testApp::windowResized(int w, int h){
}
Colored Motion
testApp.h
#pragma once
#include "ofMain.h"
#include "ofxOpenCv.h"
class testApp : public ofBaseApp{
public:
void setup();
void update();
void draw();
float sum;
ofVideoGrabber vidGrabber;
ofxCvColorImage colorImg;
ofxCvColorImage colorDiffImg;
ofxCvGrayscaleImage grayImage, ch1, ch2, ch3, ch4;
ofxCvGrayscaleImage grayPreviousImage;
ofxCvFloatImage floatImg,floatImg2, ch1f, ch2f, ch3f;
ofxCvGrayscaleImage grayDiff;
ofVideoPlayer vidPlayer;
vector<ofxCvGrayscaleImage> previousImages;
};
testApp.cpp
#include "testApp.h"
//--------------------------------------------------------------
void testApp::setup(){
// change the window to hold enough space for 6 movies (3x2)
ofSetWindowShape(320,240);
ofSetFrameRate(30);
// initialize our camera with a resolution of 320z240
vidGrabber.initGrabber(320,240);
// load a movie in and set it to loop, and then start it (play())
vidPlayer.loadMovie("sunra_pink.mov");
vidPlayer.setLoopState(OF_LOOP_NORMAL);
vidPlayer.play();
sum = 0;
// these are (wrappers for) opencv image containers
// we'll use for image processing
// we are going to find the difference between successive frames
colorImg.allocate(320,240);
colorDiffImg.allocate(320, 240);
grayImage.allocate(320,240);
grayPreviousImage.allocate(320,240);
grayDiff.allocate(320,240);
floatImg.allocate(320, 240);
floatImg2.allocate(320, 240);
ch1.allocate(320, 240);
ch2.allocate(320, 240);
ch3.allocate(320, 240);
ch1f.allocate(320, 240);
ch2f.allocate(320, 240);
ch3f.allocate(320, 240);
ch4.allocate(320, 240);
previousImages.push_back(grayPreviousImage);
previousImages.push_back(grayPreviousImage);
previousImages.push_back(grayPreviousImage);
}
//--------------------------------------------------------------
void testApp::update(){
// background to black
ofBackground(0);
// update the camera
vidGrabber.update();
if (vidGrabber.isFrameNew()) {
// set the color image (opencv container) to the camera image
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
// convert to grayscale
grayImage = colorImg;
// calculate the difference image
grayDiff = grayImage;
// compute the absolute difference with the previous frame's grayscale image
grayDiff.absDiff(previousImages[0]);
// store the current grayscale image for the next iteration of update()
previousImages.push_back(grayImage);
if (previousImages.size() > 5) {
previousImages.erase(previousImages.begin());
}
floatImg = grayDiff;
floatImg.blur();
double minv, maxv;
CvPoint minl, maxl;
cvMinMaxLoc(floatImg.getCvImage(), &minv, &maxv, &minl, &maxl);
cv::Mat m = floatImg.getCvImage();
m = m / maxv * 3.0;
cvSplit(colorImg.getCvImage(), ch1.getCvImage(), ch2.getCvImage(), ch3.getCvImage(), 0);
ch1.flagImageChanged();
ch2.flagImageChanged();
ch3.flagImageChanged();
ch1f = ch1;
ch2f = ch2;
ch3f = ch3;
cvMul(ch1f.getCvImage(), floatImg.getCvImage(), ch1f.getCvImage());
cvMul(ch2f.getCvImage(), floatImg.getCvImage(), ch2f.getCvImage());
cvMul(ch3f.getCvImage(), floatImg.getCvImage(), ch3f.getCvImage());
ch1 = ch1f;
ch2 = ch2f;
ch3 = ch3f;
cvMerge(ch1.getCvImage(), ch2.getCvImage(), ch3.getCvImage(), 0, colorDiffImg.getCvImage());
colorDiffImg.flagImageChanged();
// let's threshold the difference image,
// all values less than 10 are 0, all values above 10 become 255
//grayDiff.threshold(10);
// here we will find the sum and then average of all the pixels in the difference image
// this will be used for a simple measure of "motion"
sum = 0.9 * sum + 0.1 * cvSum(grayDiff.getCvImage()).val[0] / 320.0 / 240.0 / 10.0;
// let's change the speed of our movie based on the motion value we calculated
vidPlayer.setSpeed(sum);
vidPlayer.update();
floatImg = grayImage;
cvSobel(floatImg.getCvImage(), floatImg2.getCvImage(), 1, 1, 5);
floatImg2.flagImageChanged();
}
}
//--------------------------------------------------------------
void testApp::draw(){
// draw the difference image (thresholded)
colorDiffImg.draw(0,0,ofGetWindowWidth(),ofGetWindowHeight());
// draw the sum of the motion pixels divided by the number of motion pixels
// (average of difference values)
char buf[256];
sprintf(buf, "%f", sum);
ofDrawBitmapString(buf, 20, 20);
}