Difference between revisions of "Sound Sketch - Emilio Marcelino, Greg Parsons, and Ben Brickley"

From Robert-Depot
Jump to: navigation, search
(Documentation)
 
(5 intermediate revisions by the same user not shown)
Line 24: Line 24:
 
The project worked well as a display piece and functioned while under demand. After discussion with the class we agree that the project is stronger as a volume level reacting rather than frequency reacting design. It would be interesting to further develop the project and implement it into a gallery setting. Where people would walk past it and have it react. It would be better to fine tune it so that it can be used on a larger set of screens with smaller drawings so that it would not have to be reset after a short amount of use, this would allow more people and a longer amount of time for interaction.  
 
The project worked well as a display piece and functioned while under demand. After discussion with the class we agree that the project is stronger as a volume level reacting rather than frequency reacting design. It would be interesting to further develop the project and implement it into a gallery setting. Where people would walk past it and have it react. It would be better to fine tune it so that it can be used on a larger set of screens with smaller drawings so that it would not have to be reset after a short amount of use, this would allow more people and a longer amount of time for interaction.  
  
Images coming soon.
+
== '''Video''' ==
 +
 
 +
[http://www.youtube.com/watch?v=vmtfh574R3o Video Documentation]
 +
 
 +
== '''Code Audio Level Based'''==
  
 
<code>
 
<code>
Line 152: Line 156:
 
   super.stop();
 
   super.stop();
 
}
 
}
 +
 +
</code>
 +
 +
=== '''Code Audio Frequency Based''' ===
 +
 +
<code>
 +
 +
 +
import hypermedia.video.*;
 +
import ddf.minim.*;
 +
import ddf.minim.analysis.*;
 +
import ddf.minim.signals.*;
 +
 +
Minim minim;
 +
AudioInput in;
 +
FFT fft;
 +
 +
OpenCV opencv;
 +
 +
// contrast/brightness values
 +
int contrast_value    = 0;
 +
int brightness_value  = 0;
 +
 +
float loudestFreqAmp = 0;
 +
float loudestFreq = 0;
 +
int timerCounter = 0;
 +
 +
void setup()
 +
{
 +
  size(640, 480, P2D);
 +
  frameRate(30);
 +
  noCursor();
 +
  minim = new Minim(this);
 +
  minim.debugOn();
 +
  background(255);
 +
  noStroke();
 +
  // get a line in from Minim, default bit depth is 16
 +
  in = minim.getLineIn(Minim.STEREO, 1024);
 +
  fft = new FFT(in.bufferSize(), in.sampleRate());
 +
 +
  opencv = new OpenCV( this );
 +
  opencv.capture( width, height );                  // open video stream
 +
  opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT );  // load detection description, here-> front face detection : "haarcascade_frontalface_alt.xml"
 +
 +
 +
}
 +
 +
void draw()
 +
{
 +
 +
  // grab a new frame
 +
  // and convert to gray
 +
  opencv.read();
 +
  opencv.convert( GRAY );
 +
  opencv.contrast( contrast_value );
 +
  opencv.brightness( brightness_value );
 +
 +
  // proceed detection
 +
  java.awt.Rectangle[] faces = opencv.detect( 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 40, 40 );
 +
 +
  // display the image
 +
  //image( opencv.image(), 0, 0 );
 +
 +
  // draw face area(s)
 +
  //  noFill();
 +
  //  stroke(255,0,0);
 +
  //  for( int i=0; i<faces.length; i++ ) {
 +
  //    rect( faces[i].x, faces[i].y, faces[i].width, faces[i].height );
 +
  //  }
 +
 +
  int posX = 0;
 +
  int posY = 0;
 +
 +
  for( int i=0; i<faces.length; i++ ) {
 +
    posX = faces[i].x;
 +
    posY = faces[i].y;
 +
  }
 +
 +
  fft.window(FFT.HAMMING);
 +
  for(int i = 0; i < fft.specSize(); i++)
 +
  {
 +
    // draw the line for frequency band i, scaling it by 4 so we can
 +
    //see it a bit better
 +
    if (fft.getBand(i) > loudestFreqAmp && fft.getBand(i) > 10)
 +
    {
 +
      loudestFreqAmp = fft.getBand(i);
 +
      loudestFreq = i * 4;
 +
 +
      // draw the thing
 +
      drawCircles(posX, posY, (int)loudestFreqAmp, 10); 
 +
 +
 +
      timerCounter = 0;
 +
      System.out.println(loudestFreq + "---" + loudestFreqAmp);
 +
    }
 +
  }
 +
  loudestFreqAmp = 0;
 +
 +
  fft.forward(in.mix);
 +
 +
  if(timerCounter >= 20)
 +
  {
 +
    background(255);
 +
    timerCounter = 0;
 +
  }
 +
 +
  timerCounter++;
 +
 +
 +
 +
 +
}
 +
 +
void keyPressed() {
 +
  if (key == 'a') {
 +
    background(255);
 +
  }
 +
}
 +
 +
 +
// Circle splatter machine
 +
void drawCircles(float x, float y, int radius, int level)
 +
{
 +
  noStroke();
 +
  float tt = 116 * level / 6.0;
 +
  fill (tt, 45, 255);
 +
  ellipse(x, y, radius*2, radius*2);
 +
  if (level > 1) {
 +
    level = level - 1;
 +
    int num = int (random(2, 5));
 +
    for(int i=0; i<num; i++) {
 +
      float a = random(0, TWO_PI);
 +
      float nx = x + cos(a) * 6.0 * level;
 +
      float ny = y + sin(a) * 6.0 * level;
 +
      drawCircles(nx, ny, radius/2, level);
 +
    }
 +
  }
 +
}
 +
 +
void stop()
 +
{
 +
  // always close Minim audio classes when you are done with them
 +
  in.close();
 +
  minim.stop();
 +
 +
  super.stop();
 +
}
 +
  
 
</code>
 
</code>

Latest revision as of 16:08, 20 May 2010

Motivation

Sticking to our primary interests in color, movement and scale, we decided to create a project that would require a computer, a microphone, and a mouse. With these three components we would be able to create a graffiti drawing program. Later came another interest in using the webcam to create a drawing program. So we replaced the mouse element with the webcam.


Interaction

To interact with our piece the individual will have to have a webcam, a microphone and a computer with Processing. When running the sketch we will use head tracking to replace the mouse and when speaking or blowing into the microphone, the program will draw.

Basically... speaking and moving your head simultaneously will allow you to draw within Processing.

Function

We will add the webcam head tracking sketch to the original microphone/mouse sketch to create a microphone and webcam drawing program.

Visualization

HEDZl.jpg

Documentation

The project worked well as a display piece and functioned while under demand. After discussion with the class we agree that the project is stronger as a volume level reacting rather than frequency reacting design. It would be interesting to further develop the project and implement it into a gallery setting. Where people would walk past it and have it react. It would be better to fine tune it so that it can be used on a larger set of screens with smaller drawings so that it would not have to be reset after a short amount of use, this would allow more people and a longer amount of time for interaction.

Video

Video Documentation

Code Audio Level Based

import hypermedia.video.*; import ddf.minim.*; import ddf.minim.analysis.*; import ddf.minim.signals.*;

Minim minim; AudioInput in;

OpenCV opencv;

// contrast/brightness values int contrast_value = 0; int brightness_value = 0;

float loudestFreqAmp = 0; float loudestFreq = 0; int timerCounter = 0;

void setup() {

 size(640, 480, P2D);
 frameRate(30);
 noCursor();
 minim = new Minim(this);
 minim.debugOn();
 background(255);
 noStroke();
 // get a line in from Minim, default bit depth is 16
 in = minim.getLineIn(Minim.STEREO, 1024);
 opencv = new OpenCV( this );
 opencv.capture( width, height );                   // open video stream
 opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT );  // load detection description, here-> front face detection : "haarcascade_frontalface_alt.xml"


}


void draw() {

 // grab a new frame
 // and convert to gray
 opencv.read();
 opencv.convert( GRAY );
 opencv.contrast( contrast_value );
 opencv.brightness( brightness_value );
 // proceed detection
 java.awt.Rectangle[] faces = opencv.detect( 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 40, 40 );
 // display the image
 //image( opencv.image(), 0, 0 );
 // draw face area(s)
 //  noFill();
 //  stroke(255,0,0);
 //  for( int i=0; i<faces.length; i++ ) {
 //    rect( faces[i].x, faces[i].y, faces[i].width, faces[i].height ); 
 //  }
 int posX = 0;
 int posY = 0; 
 for( int i=0; i<faces.length; i++ ) {
   posX = faces[i].x; 
   posY = faces[i].y; 
 }


 float m = 0;
 for(int i = 0; i < in.bufferSize() - 1; i++) {
   if ( abs(in.mix.get(i)) > m ) {
     m = abs(in.mix.get(i));
     System.out.println(in.mix.get(i));
      
   }
 }
 m*=150;
 drawCircles(posX, posY, m, 10);
 
 
 if(timerCounter >= 20)
 {
   background(255);
   timerCounter = 0;
 }
 timerCounter++;

}

void keyPressed() {

 if (key == 'a') {
   background(255);
 }

}

// Circle splatter machine void drawCircles(float x, float y, float radius, int level) {

 noStroke();
 float tt = 200 * level / 6.0; 
 fill (tt, 0, 116);//tt, 0, 116
 ellipse(x, y, radius*2, radius*2);
 if (level > 1) {
   level = level - 1;
   int num = int (random(2, 5));
   for(int i=0; i<num; i++) { 
     float a = random(0, TWO_PI);
     float nx = x + cos(a) * 6.0 * level; 
     float ny = y + sin(a) * 6.0 * level; 
     drawCircles(nx, ny, radius/2, level); 
   }
 }

}

void stop() {

 // always close Minim audio classes when you are done with them
 in.close();
 minim.stop();
 super.stop();

}

Code Audio Frequency Based


import hypermedia.video.*; import ddf.minim.*; import ddf.minim.analysis.*; import ddf.minim.signals.*;

Minim minim; AudioInput in; FFT fft;

OpenCV opencv;

// contrast/brightness values int contrast_value = 0; int brightness_value = 0;

float loudestFreqAmp = 0; float loudestFreq = 0; int timerCounter = 0;

void setup() {

 size(640, 480, P2D);
 frameRate(30);
 noCursor();
 minim = new Minim(this);
 minim.debugOn();
 background(255);
 noStroke();
 // get a line in from Minim, default bit depth is 16
 in = minim.getLineIn(Minim.STEREO, 1024);
 fft = new FFT(in.bufferSize(), in.sampleRate());
 opencv = new OpenCV( this );
 opencv.capture( width, height );                   // open video stream
 opencv.cascade( OpenCV.CASCADE_FRONTALFACE_ALT );  // load detection description, here-> front face detection : "haarcascade_frontalface_alt.xml"


}

void draw() {

 // grab a new frame
 // and convert to gray
 opencv.read();
 opencv.convert( GRAY );
 opencv.contrast( contrast_value );
 opencv.brightness( brightness_value );
 // proceed detection
 java.awt.Rectangle[] faces = opencv.detect( 1.2, 2, OpenCV.HAAR_DO_CANNY_PRUNING, 40, 40 );
 // display the image
 //image( opencv.image(), 0, 0 );
 // draw face area(s)
 //  noFill();
 //  stroke(255,0,0);
 //  for( int i=0; i<faces.length; i++ ) {
 //    rect( faces[i].x, faces[i].y, faces[i].width, faces[i].height ); 
 //  }
 int posX = 0;
 int posY = 0; 
 for( int i=0; i<faces.length; i++ ) {
   posX = faces[i].x; 
   posY = faces[i].y; 
 }
 fft.window(FFT.HAMMING);
 for(int i = 0; i < fft.specSize(); i++)
 {
   // draw the line for frequency band i, scaling it by 4 so we can
   //see it a bit better
   if (fft.getBand(i) > loudestFreqAmp && fft.getBand(i) > 10)
   {
     loudestFreqAmp = fft.getBand(i);
     loudestFreq = i * 4;
     // draw the thing
     drawCircles(posX, posY, (int)loudestFreqAmp, 10);  


     timerCounter = 0;
     System.out.println(loudestFreq + "---" + loudestFreqAmp);
   }
 }
 loudestFreqAmp = 0;
 fft.forward(in.mix);
 if(timerCounter >= 20)
 {
   background(255);
   timerCounter = 0;
 }
 timerCounter++;



}

void keyPressed() {

 if (key == 'a') {
   background(255);
 }

}


// Circle splatter machine void drawCircles(float x, float y, int radius, int level) {

 noStroke();
 float tt = 116 * level / 6.0; 
 fill (tt, 45, 255);
 ellipse(x, y, radius*2, radius*2);
 if (level > 1) {
   level = level - 1;
   int num = int (random(2, 5));
   for(int i=0; i<num; i++) { 
     float a = random(0, TWO_PI);
     float nx = x + cos(a) * 6.0 * level; 
     float ny = y + sin(a) * 6.0 * level; 
     drawCircles(nx, ny, radius/2, level); 
   }
 }

}

void stop() {

 // always close Minim audio classes when you are done with them
 in.close();
 minim.stop();
 super.stop();

}