research

work with mark: array primitives and benchmarking the framework

i spent a lot of time benchmarking the code for the agents in the different versions of the multi-agent frameworks i posted about here earlier. best performance boost was (of course) when i ported some vital parts of the supercollider code to c. so for example one cpu-intensive task that the agents had to do a lot was to find out about their surroundings. and another 'heavy' task was to calculate the distance to other objects. each operation wasn't very demanding on its own, but when hundreds of agents would do this at the same time, we really needed the speed of the c primitives.

below is the c code i came up with. it replaces some of the computational heavy parts in the Surroundings and ALoaction supercollider classes. to try them out you'd need to download the supercollider source code from svn, add my code to the file PyrArrayPrimitives.cpp and then recompile the whole application. edit the file extArrayedCollection.sc in the A4.zip package posted here earlier to use the primitives.
one issue we then had was to distribute this 'hack'. supercollider doesn't have an api for adding extensions like this to the language (but there's a nice plugin architecture for the server). so i had to build dedicated sc applications including this speed hack.

int prArrayAsALocationIndex(struct VMGlobals *g, int numArgsPushed)
{
        PyrSlot *a, *b;
        PyrObject *obj;
        int size, i;
        double asIndex, w, worldSize;
        a = g->sp - 1;
        b = g->sp;
        worldSize = b->ui;
        obj = a->uo;
        size = obj->size;
        asIndex= 0;
        for (i=0; i<size; ++i) {
                getIndexedDouble(obj, i, &w);
                asIndex= asIndex+(pow(worldSize, i)*w);
        }
        SetFloat(a, asIndex);
        return errNone;
}
int prArrayAsALocationRoundedIndex(struct VMGlobals *g, int numArgsPushed)
{
        PyrSlot *a, *b;
        PyrObject *obj;
        int size, i;
        double asIndex, w, worldSize;
        a = g->sp - 1;
        b = g->sp;
        worldSize = b->ui;
        obj = a->uo;
        size = obj->size;
        asIndex= 0;
        for (i=0; i<size; ++i) {
                getIndexedDouble(obj, i, &w);
                w= sc_round(w, 1.0);
                w= sc_clip(w, 0, worldSize-1);
                asIndex= asIndex+(pow(worldSize, i)*w);
        }
        SetFloat(a, asIndex);
        return errNone;
}
int prArrayDistance(struct VMGlobals *g, int numArgsPushed)
{
        PyrSlot *a, *b;
        PyrObject *obj1, *obj2;
        int size, i;
        double w1, w2, distance;
        a = g->sp - 1;
        b = g->sp;
        if (b->utag != tagObj || a->uo->classptr != b->uo->classptr) return errWrongType;
        obj1 = a->uo;
        obj2 = b->uo;
        size = obj1->size;
        distance= 0;
        for (i=0; i<size; ++i) {
                getIndexedDouble(obj1, i, &w1);
                getIndexedDouble(obj2, sc_mod(i, size), &w2);
                distance= distance+pow(w2-w1, 2);
        }
        SetFloat(a, fabs(sqrt(distance)));
        return errNone;
}
int prArraySurroundings(struct VMGlobals *g, int numArgsPushed)
{
        PyrSlot *a, *b, *c, *d, *areaArraySlots, *indexArraySlots, *outArraySlots;
        PyrObject *obj, *areaArray, *indexArray, *outArray;
        int areaSize, outSize, i, j, worldDim, area;
        double w;
        a = g->sp - 3;          //list
        b = g->sp - 2;          //worldDim
        c = g->sp - 1;          //area - as float possible later?
        d = g->sp;                      //boolean - exclude/include fix later
        if (b->utag != tagInt) return errWrongType;
        if (c->utag != tagInt) return errWrongType;
        if (d->utag != tagTrue && d->utag != tagFalse) return errWrongType;
        obj = a->uo;
        worldDim = b->ui;
        area = c->ui;
        areaSize = area*2+1;
        indexArray = newPyrArray(g->gc, worldDim, 0, true);
        indexArraySlots = indexArray->slots;
        indexArray->size = worldDim;
        if (IsTrue(d)) {        //--build index array excluding
                areaArray = newPyrArray(g->gc, areaSize-1, 0, true);
                areaArraySlots = areaArray->slots;
                areaArray->size = areaSize-1;
                int j = 0;
                for (i=0; i<areaSize-1; ++i) {
                        int temp = 0-area+i;
                        if (temp==0) {j++;}
                        areaArraySlots[i].ucopy = temp+j;
                }
                outSize = pow(areaSize, worldDim)-1;
        } else {                        //--build index array including
                areaArray = newPyrArray(g->gc, areaSize, 0, true);
                areaArraySlots = areaArray->slots;
                areaArray->size = areaSize;
                for (i=0; i<areaSize; ++i) {
                        areaArraySlots[i].ucopy = 0-area+i;
                }
                outSize = pow(areaSize, worldDim);
        }
        for (i=0; i<worldDim; ++i) {
                SetObject(indexArraySlots+i, areaArray);
        }
        //indexArray is here... [[-1, 0, 1]] or [[-1, 0, 1], [-1, 0, 1]] etc. for area=1
        //or [[-2, -1, 0, 1, 2]] or [[-2, -1, 0, 1, 2], [-2, -1, 0, 1, 2]] etc. for area=2
        //--all tuples
        outArray = newPyrArray(g->gc, outSize*sizeof(PyrObject), 0, true);
        outArraySlots = outArray->slots;
        outArray->size = outSize;
        for (i=0; i<outSize; ++i) {
                int k = i;
                PyrObject *tempArray = newPyrArray(g->gc, worldDim, 0, true);
                PyrSlot *tempArraySlots = tempArray->slots;
                tempArray->size = worldDim;
                for (j=worldDim-1; j>=0; --j) {
                        tempArraySlots[j].ucopy = areaArraySlots[k%areaSize].ucopy;
                        getIndexedDouble(obj, j, &w);
                        tempArraySlots[j].ucopy += w;
                        k /= areaSize;
                }
                SetObject(outArraySlots+i, tempArray);
        }
        a->uo = outArray;
        return errNone;
}

supercollider code for benchmarking...

//speedtest
({
        var size= 100, cSize= 2, rule= 30;
        var world, agents, y= 0, dict;
        dict= ();               /*lookup dictionary for rules*/
        8.do{|i| dict.put(i.asBinaryDigits(3).join.asSymbol, rule.asBinaryDigits[7-i])};
        ACell.rules= dict;
        world= APattern(size);                                          /*create 1d world*/
        size.do{|i| ACell(ALocation(world, [i]))};      /*fill up 1d grid with agents*/
        world.get(ALocation(world, [(size/2).round])).value= 1; /*middle agent value=1 as init*/
        agents= world.items;
        while({y&lt;size}, {
                /*here update.  first all agents.sense then all agents.act*/
                agents.do{|a| a.sense};
                /*agents.do{|a| a.location.list_([(size/2).round])};*/
                /*agents.do{|a| a.location= ALocation(a.location.world, [(size/2).round])};*/
                agents.do{|a| a.act};
                y= y+1;
        });
}.bench)

work with mark: bottom-up approach

after some time mark d'inverno and i shifted focus and decided to simplify our ideas. we agreed to work more from a bottom-up approach - letting the agents live within a grid world and visualise their behaviours. other people have been doing quite some work in this area before, but not particularly many of them have been incorporating sound and music. so we had literature and examples to study and people to ask. it was of great help and i learned a lot about designing multi-agent systems from analysing for example jon mccormack's nice eden.

so starting out writing our own system, i did a set of classes for handling agents running around in a grid world of 1-3 dimensions. all agents were very simple minded. they were visually represented with just dots and oh, they could bleep too.
setting up simple scenarios for these classes helped to pinpoint different system models. it also showed my biggest problems coding this usually boiled down to in which order to do things. the model i tried in turn to 'model' was suggested by rob saunders in an article called 'smash, bam and cell', in were all agents first sense their surroundings and then act. but i constantly had to restructure the code and design. this was harder than i had thought and i think i never came up with an all around solution.

one example scenario we came up with was the runaway test. it is very simple but can help trying out different designs. it works something like this... imagine a grid world of say 2 dimensions (we also coded this in 1 and 3D). agents move about at random speed and direction. if an agent encounters another one blocking its next step, it turns around 180 degrees and flee i.e. moving away in the opposite direction. so far the sense/act cycle is simple: for every update (world tick) it first sense, then acts. but what happens if there's another agent blocking the escape route? so the agents really needs to first sense, then if something is ahead, act and turn around 180, sense again and decide if it is possible to flee. here it'll sense within the act method and that clutters the design. the better solution would probably be to let the agent just turn 180 and wait to flee until the next tick. but perhaps it could also sense behind itself in the first sense round and pause if both escape routes are blocked. there are many possible solutions and creating these small test scenarios helped me to generalise my classes. we also tried the classes by coding the test scenarios as discrete and continuous i.e. if the world was a rigid grid in which the agents only were allowed to move stepwise, or if the world allowed them to move about more smoothly in non-integer directions and speeds.

the supercollider code for version 4, including test scenarios and small examples is attached at the bottom of this post and below is some text trying to describe the classes in more detail.

also see these quicktime movies of some test scenarios...
bounce 1D 2 one dimensional continuous world.
bounce 2D 2 two dimensional continuous world.
runaway 1D 3 one dimensional discrete world.
runaway 1D 4 one dimensional discrete world.
runaway 1D 5 one dimensional discrete world.
runaway 1D 6 one dimensional discrete world.
runaway 2D 1 two dimensional discrete world.

//--------------------------------------------------------------------------------------------------------------------------------

A4 description
there are 3 basic classes. ALocation, AWorld and AnItem. i first describe them and their immediate subclasses. then AnAgent, AProxyAgent and some subs of AWorld. then i explain a few of the classes used in the test/example programs. last i write a little about the main loop.
may look at the files A4.sc and A4subs.sc for completion.

//-------------------
ALocation

BASICS:
a place within a world.
.new takes 2 arguments: world and list
instance variable world is any (sub)class of AWorld. can be get (ie accessed from outside).
instance variable list is a list of coordinates. can be get and set. example: [10] for x=10 in an 1 dimensional world. [10, 20, 30] for x, y, z in a 3d world. the length of the list must correspond to the number of dimensions in the world.

ADDITIONAL:
locations can be compared with the == method. it takes another location as an argument. a location is equal to another if they exist in the same world and have the same list of coordinates.

the != method is the negation of ==.

distance between 2 locations can be found with the distance (arg alocation) method. it'll return the shortest distance between locations in any dimension.

with the at method one can query single dimensions. eg. in a 2d world, location.at(0) will return which column and location.at(1) will return row. the argument is really just index in list above.

the surroundingLocations method. with arguments exclude(boolean) and area(int) returns a list of new location objects. this is used for collecting locations to be searched for neighbours. if exclude argument flag is false, this (ie current) location will be counted and included in the list. the locations returned are all positioned next to this location in a cubelike way, covering an area of size: area steps away. to put it in another way: with an area of 1, only directly adjacent locations are returned. an area of 2 gives adjacent and their adjacent locations (as a set of ALocations) and so on.
so in a 1d world a location at (0) sent the message .surroundingLocations(false, 1) will give us [loc[-1], loc[0], loc[1]]. and likewise in a 2d world a location at (4, 5) sent the message .surroundingLocations(false, 1) will return [loc[3, 4], loc[3, 5], loc[3, 6], loc[4, 4], loc[4, 5], loc[4, 6], loc[5, 4], loc[5, 5], loc[5, 6]]. here's the code that resembles this: ALocation(AWorld(2, 10), [4, 5]).surroundingLocations(false, 1). last example: a location within a 3d world asked to return its surroundings with an area of 3 like this: ALocation(AWorld(3, 100), [40, 50]).surroundingLocations(false, 3).size will return a list of 343 unique locations.

DETAILS:
when a location object is created it check its world's size and wrap around borders (by doing modulo(size) on the coordinates in the list).

the location class expects the world to be of uniform size in all dimensions.

COMMENTS:
050712 - distance might need to go in a subclass if we do networked worlds - rob's comment. how to calculate distance between worlds?
050712 - at robs suggestion: i'll try to rewrite this and the world classes using hashtable lookup. the matrix/location duality causes trouble keeping same thing at 2 places.
050712 - guess naming needs to be improved. specially AQLocation - what to call it?
050726 - removed the maxDimension and surroundMaxArea limitations and its classvariable
050726 - now hashtable lookup + c primitives. quite a lot faster overall and easier to keep things at the same place.

SUBCLASSES:
AQLocation - a quantified location. the coordinates for this class can be flotingpoint but when it places itself in the matrix it rounds of to nearest integer.

//-------------------
AWorld

BASICS:
a placeholder for items. superclass for APattern, AGrid, ACube, BugWorld etc.
.new takes 3 arguments: dimensions, size and location
instance variable dimensions is an integer specifying the number of dimensions for this world. can be get.
instance variable size will decide size of 1 dimension. the world is then created with uniform size in all dimensions. can be get.
instance variable location if defined, will place the world at a location. if left out - no parent. can be get.

ADDITIONAL:
the clear method takes a location object as argument and puts a nil there.

with the remove method - argument: an item - you remove an item from this world.

with put you place an item in this world. argument: item

the get method returns whatever item(s) is in a location. argument: location

method items returns a list of all items in this world.

neighbours - arguments: item, exclude and area. returns a list of any items within an item's area (1=adjcent locations) including or excluding the item's own location. if no items nearby then empty list.

neighboursSparse is similar to neighbours above (same arguments and function) but uses a different algo for finding nearby items. where neighbours calculates locations around the item in question and then check these locations for any items, this method might be quicker in a sparse world. it looks through all items in the world and checks if they're nearby.

running the update method goes through all items in this world, copies them to their own locations. this is to make sure all item's locations and the hashtable stays in sync.

save will write this world, its current settings and all its items and their settings to disk. this allow for backup of longrunning tasks or 'presets' to be loaded.

DETAILS:

COMMENTS:

SUBCLASSES:
AWorld2 - remove and put methods are modified to allow for multiple items at the same location. AWorld can only do one item in once location. hopefully i can merge the two classes later.

ASmartWorld - is a world that can resolve location conflicts. there's a resolve method to be called after the sense cycle but before act. this goes through all items and if more than one intend to move to the same location, only let one move - others stay put and their request is ignored. (comment: need to find a better name than ASmartWorld)

and many other subclasses. eg BugWorld, APattern, AGrid, ACube. almost every test program has its own specialised class inheriting from these two subclasses.

//-------------------
AnItem

BASICS:
lowest level thing that exist in a world. abstract superclass class for ARock, AMoss, AnAgent
.new takes 1 argument: location

ADDITIONAL:
the method remove will remove this item from its world.

DETAILS:
the abstract init method is used by some subclasses for initialisation.

COMMENTS:
050726 - is remove needed? will the agent remove itself or will the world handle that eg if energy=0.

SUBCLASSES:
ARock - does nothing different. just exists at a location
AMoss - has energy that can be get/set.
AnAgent - is an abstract class. see below

//--------------------------------------------------------------------------------------------------------------------------------

AnAgent
subclass of AnItem but is also an abstract class.
makes sure the sense and act methods are there for all agents to follow.

SUBCLASSES:
many. eg ACell, ARunaway, ABounce, Bug. every test program has its own specialised class inheriting from this one. they all do sense and act in their own way.

//-------------------
AProxyAgent
subclass of AnAgent. it allows to replace sense and act methods while running.

DETAILS:
when asked to sense and act, sense and act in this class instead evaluates functions stored in the 2 class variables senseFunc and actFunc. these can be replaced and coded on the fly! so while the system is running, we can try out, completely rewrite or just slightly modify, behaviour for all agents. their state are kept (individually) but behaviour changes.

COMMENTS:
this is unique to other frameworks i've seen so far. i'd like to explore more and hopefully we can use it in practice too - not just as convenience for developing. with this feature it's easy to replace the rules on the fly.
perhaps i redesign the whole framework to use proxies. so the AnItem class is really a placeholder (proxy) for anything. then one can code whole agents with state and behaviour while running i think. and maybe proxy worlds too but i can't find a reason for that now.

//-------------------
APattern

BASICS:
a subclass of AWorld that has 1 dimension.

//-------------------
AGrid

BASICS:
a subclass of AWorld that has 2 dimension.

//-------------------
ACube

BASICS:
a subclass of AWorld that has 3 dimension.

//-------------------
ACell - used in A4 test1 cellautomata.rtf

BASICS:
subclass of AnAgent. it doesn't move and is used for cellular automatas and gameoflife.
instance variable value can be 0 or 1. can be get/set.
there's also a rules class variable that contains a dictionary for rule lookup.

ADDITIONAL:
the sense method here collects and stores values from nearby neighbours (by asking the world for neighbours) including the cell's own value.
the act method set the cell's own value to what is returned from the rules dictionary.

//-------------------
ALifeCell - used in A4 test2 gameoflife.rtf

BASICS:
subclass of ACell. just implements different sense and act methods.

ADDITIONAL:
the sense method here is the same as ACell.sense but excludes the cell's own value.
act will first calculate the total sum of all neighbour's values and then do lookup in the rules dictionary. the cell's own values is set to 0 or 1 depending on what the dictionary returns.

//-------------------
ARunaway - used in A4 test3 runaway1D.rtf, A4 test4 runaway2D.rtf and A4 test8 runaway3D.rtf

BASICS:
a subclass of AnAgent that sense if something at next location and if so, bleep, turn around 90 and flee.
instance variable direction is a list of directions in any dimension. in a 2D world: [0, 0] - stand still, [-1, 0] go west, [1, 1] go northeast and so on. can be get/set
instance variable freq keeps the bleep frequency to play.

ADDITIONAL:
the sense method updates the 2 private nextLocation and nextPos instance variables to figure out where to go and if that location is taken.
helper method clearAhead returns true if there's nothing in nextPos
getNextLocation returns a new location object at here + directionlist.
getNewDirection method turns directionlist aound 90 degrees.
the move method sets this location to nextLocation
the play will beep at a frequency. and pan the sound left/right depending on location.

//-------------------
ABounce1D - used in A4 test6 bounce1D.rtf

BASICS:
subclass of ARunaway. implements getNextLocation and getNewDirection differently so that the agents bounces of eachother rather that turn 90 degrees.
direction is here a vector of angle and degree.

SUBCLASSES:
ABounce2D and ABounce3D for vector math in other dimensions.

//--------------------------------------------------------------------------------------------------------------------------------

the main loop of the program is usually very simple. for the ca and gameoflife examples it just draws a rectangle if the cell's value is 1, then call .sense on all agents and last call .act for all agents.

while {true}, {
        agents.do{|a| if(a.value==1, {a.paintRect})}
        agents.do{|a| a.sense};
        agents.do{|a| a.act};
}

agents that move around (ie all other examples) need to resolve conflicts and update the world. also they always draw themselves.

while {true}, {
        agents.do{|a| a.paintRect}
        agents.do{|a| a.sense};
        world.resolve; 
        agents.do{|a| a.act};
        world.update;
}

note the A4.zip was updated 061017. bugfix to the distance method in extArrayedCollection.sc

AttachmentSize
Package icon A4.zip20.83 KB

work with mark: cellular automata

another thing i played around with while at UoW was cellular automata. here's a simple one-dimensional CA class for supercollider... external link


and here is some more sc code i wrote to come to grips with CA...

/*cellular automata /redFrik*/
(
        var w, u, width= 400, height= 300, cellWidth= 1, cellHeight= 1;
        w= Window("ca - 1", Rect(128, 64, width, height), false);
        u= UserView(w, Rect(0, 0, width, height));
        u.background= Color.white;
        u.drawFunc= {
                var pat, dict, rule, ruleRand, y= 0;
               
                /*
                rule30= 30.asBinaryDigits;              // [0, 0, 0, 1, 1, 1, 1, 0];
                rule90= 90.asBinaryDigits;              // [0, 1, 0, 1, 1, 0, 1, 0];
                rule110= 110.asBinaryDigits;            // [0, 1, 1, 0, 1, 1, 1, 0];
                rule250= 250.asBinaryDigits;            // [1, 1, 1, 1, 1, 0, 1, 0];
                rule254= 254.asBinaryDigits;            // [1, 1, 1, 1, 1, 1, 1, 0];
                */

                /*-- select rule here --*/
                //rule= 256.rand.postln;
                //rule= 90;
                rule= 30;
               
                pat= 0.dup((width/cellWidth).round);
                pat.put((pat.size/2).round, 1);
                dict= ();
                8.do{|i| dict.put(i.asBinaryDigits(3).join.asSymbol, rule.asBinaryDigits[7-i])};
               
                //--render
                Pen.fillColor= Color.black;
                while({y*cellHeight<height}, {
                        pat.do{|c, x|
                                if(c==1, {
                                        Pen.addRect(Rect(x*cellWidth, y*cellHeight, cellWidth, cellHeight));
                                });
                        };
                        pat= [0]++pat.slide(3, 1).clump(3).collect{|c|
                                dict.at(c.join.asSymbol);
                        }++[0];
                        y= y+1;
                });
                Pen.fill;
        };
        w.front;
)




more interesting than these simple examples are of course things like game-of-life.


here's one implementation for sc...

//game of life /redFrik
(
        var envir, copy, neighbours, preset, rule, wrap;
        var w, u, width= 200, height= 200, rows= 50, cols= 50, cellWidth, cellHeight;
        w= Window("ca - 2 pen", Rect(128, 64, width, height), false);
        u= UserView(w, Rect(0, 0, width, height));
        u.background= Color.white;
        cellWidth= width/cols;
        cellHeight= height/rows;
        wrap= true;                     //if borderless envir
        /*-- select rule here --*/
        //rule= #[[], [3]];
        //rule= #[[5, 6, 7, 8], [3, 5, 6, 7, 8]];
        //rule= #[[], [2]];                                             //rule "/2" seeds
        //rule= #[[], [2, 3, 4]];
        //rule= #[[1, 2, 3, 4, 5], [3]];
        //rule= #[[1, 2, 5], [3, 6]];
        //rule= #[[1, 3, 5, 7], [1, 3, 5, 7]];
        //rule= #[[1, 3, 5, 8], [3, 5, 7]];
        rule= #[[2, 3], [3]];                                           //rule "23/3" conway's life
        //rule= #[[2, 3], [3, 6]];                                      //rule "23/36" highlife
        //rule= #[[2, 3, 5, 6, 7, 8], [3, 6, 7, 8]];
        //rule= #[[2, 3, 5, 6, 7, 8], [3, 7, 8]];
        //rule= #[[2, 3, 8], [3, 5, 7]];
        //rule= #[[2, 4, 5], [3]];
        //rule= #[[2, 4, 5], [3, 6, 8]];
        //rule= #[[3, 4], [3, 4]];
        //rule= #[[3, 4, 6, 7, 8], [3, 6, 7, 8]];               //rule "34578/3678" day&night
        //rule= #[[4, 5, 6, 7], [3, 5, 6, 7, 8]];
        //rule= #[[4, 5, 6], [3, 5, 6, 7, 8]];
        //rule= #[[4, 5, 6, 7, 8], [3]];
        //rule= #[[5], [3, 4, 6]];
        neighbours= #[[-1, -1], [0, -1], [1, -1], [-1, 0], [1, 0], [-1, 1], [0, 1], [1, 1]];
        envir= Array2D(rows, cols);
        copy= Array2D(rows, cols);
        cols.do{|x| rows.do{|y| envir.put(x, y, 0)}};
        /*-- select preset here --*/
        //preset= #[[0, 0], [1, 0], [0, 1], [1, 1]]+(cols/2); //block
        //preset= #[[0, 0], [1, 0], [2, 0]]+(cols/2); //blinker
        //preset= #[[0, 0], [1, 0], [2, 0], [1, 1], [2, 1], [3, 1]]+(cols/2); //toad
        //preset= #[[1, 0], [0, 1], [0, 2], [1, 2], [2, 2]]+(cols/2); //glider
        //preset= #[[0, 0], [1, 0], [2, 0], [3, 0], [0, 1], [4, 1], [0, 2], [1, 3], [4, 3]]+(cols/2); //lwss
        //preset= #[[1, 0], [5, 0], [6, 0], [7, 0], [0, 1], [1, 1], [6, 2]]+(cols/2); //diehard
        //preset= #[[0, 0], [1, 0], [4, 0], [5, 0], [6, 0], [3, 1], [1, 2]]+(cols/2); //acorn
        preset= #[[12, 0], [13, 0], [11, 1], [15, 1], [10, 2], [16, 2], [24, 2], [0, 3], [1, 3], [10, 3], [14, 3], [16, 3], [17, 3], [22, 3], [24, 3], [0, 4], [1, 4], [10, 4], [16, 4], [20, 4], [21, 4], [11, 5], [15, 5], [20, 5], [21, 5], [34, 5], [35, 5], [12, 6], [13, 6], [20, 6], [21, 6], [34, 6], [35, 6], [22, 7], [24, 7], [24, 8]]+(cols/4); //gosper glider gun
        //preset= #[[0, 0], [2, 0], [2, 1], [4, 2], [4, 3], [6, 3], [4, 4], [6, 4], [7, 4], [6, 5]]+(cols/2); //infinite1
        //preset= #[[0, 0], [2, 0], [4, 0], [1, 1], [2, 1], [4, 1], [3, 2], [4, 2], [0, 3], [0, 4], [1, 4], [2, 4], [4, 4]]+(cols/2); //infinite2
        //preset= #[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0], [5, 0], [6, 0], [7, 0], [9, 0], [10, 0], [11, 0], [12, 0], [13, 0], [17, 0], [18, 0], [19, 0], [26, 0], [27, 0], [28, 0], [29, 0], [30, 0], [31, 0], [32, 0], [34, 0], [35, 0], [36, 0], [37, 0], [38, 0]]+(cols/4); //infinite3
        //preset= Array.fill(cols*rows, {[cols.rand, rows.rand]});
        preset.do{|point| envir.put(point[0], point[1], 1)};
        i= 0;
        u.drawFunc= {
                i= i+1;
                Pen.fillColor= Color.black;
                cols.do{|x|
                        rows.do{|y|
                                if(envir.at(x, y)==1, {
                                        Pen.addRect(Rect(x*cellWidth, height-(y*cellHeight), cellWidth, cellHeight));
                                });
                        };
                };
                Pen.fill;
                cols.do{|x|
                        rows.do{|y|
                                var sum= 0;
                                neighbours.do{|point|
                                        var nX= x+point[0];
                                        var nY= y+point[1];
                                        if(wrap, {
                                                sum= sum+envir.at(nX%cols, nY%rows); //no borders
                                        }, {
                                                if((nX>=0)&&(nY>=0)&&(nX<cols)&&(nY<rows), {sum= sum+envir.at(nX, nY)}); //borders
                                        });
                                };
                                if(rule[1].includes(sum), {     //borne
                                        copy.put(x, y, 1);
                                }, {
                                        if(rule[0].includes(sum), {     //lives on
                                                copy.put(x, y, envir.at(x, y));
                                        }, {    //dies
                                                copy.put(x, y, 0);
                                        });
                                });
                        };
                };
                envir= copy.deepCopy;
        };
        Routine({while{w.isClosed.not} {u.refresh; i.postln; (1/20).wait}}).play(AppClock);
        w.front;
)

work with mark: inaugural talk

a small project i did for mark d'invero's inaugural talk nov'04 was to create a sort of entrance music or ambience as people came in and took their seats. i transcribed one of mark's jazz tunes called val's song. the head of this song - repeated over and over - formed the basic musical material. then i coded few agents that would play and manipulate it. the agents got input from a video-analysis program i had written in max/softvns. so there was a dv-camera looking at the crowd taking their seats and the program looked at the total motion of 4 different areas of the hall. as the people settled the amount of motion decreased to nearly zero and this made a nice form curve for the piece as a whole... people coming in, mingling and slowly calming down right before the talk.
the agents were allowed control over certain aspects of the music like overall tempo, which scale to use, transposition, volume, sustain and legato of tones and the amount of reverb. and we used supercollider plus the RedDiskInSamplerGiga with a grand-piano sample library to get a nice but discrete sound. unfortunately very few noticed the music system as we did not allow it much volume and the crowd was pretty noisy.

here's the basic song material val's song in supercollider code...

/*val's song by m.d'inverno*/
s.boot;
TempoClock.default.tempo_(2.2)
(
a= Pdef(\x, Pbind(      /*melody*/
        \root, 3,
        \octave, 5,
        [\degree, \dur], Pseq([
                [-3, 1.5], [0, 1], [1, 0.5], [2, 0.5], [3, 1], [4, 1], [2, 0.5],
                [-3, 1.5], [0, 1], [2, 0.5], [1, 3],
                [0, 0.75], [1, 0.75], [2, 0.75], [4, 0.75], [7, 0.75], [8, 0.75], [9, 0.75], [7, 0.75],
                [8, 1.5], [7, 1], [9, 0.5], [8, 3],
                [-3b, 1.5], [0, 1], [7, 0.5], [7, 2], [0, 1],
                [0, 1.5], [7, 1.5], [7, 0.5], [8, 0.5], [9, 2],
                [7, 0.5], [4, 1], [4, 1], [4, 0.5], [3, 0.5], [-1b, 1], [-1b, 1], [-2b, 0.5],
                [-3, 3], [\rest, 3]
        ], inf)
));
b= Pdef(\y, Pbind(      /*bass*/
        \root, 3,
        \octave, 4,
        [\degree, \dur], Pseq([
                [4, 3], [0, 2], [0, 1],
                [4, 3], [-1, 2], [-1, 1],
                [-2, 3], [-2, 2], [2, 1],
                [-3, 3], [-3, 2], [-2b, 1],
                [-2, 2], [0, 1], [2, 3],
                [3, 1.5], [-2, 1.5], [0, 3],
                [-3, 3], [-3, 2], [0b, 1],
                [0, 3], [-3, 3]
        ], inf)
))
)
Ppar([a, b]).play;

and below is the code i stepped through as a small part of the actual talk. this was just to let people see how we could build music from scratch using sc. in the end mark played along together with this on his electrical piano.

/*--setup--*/
s.boot;
a= [0, 0.25, 1, 0, 0, 0.5, 0, 0.25].scramble;
b= [1, 0, 0, 0, 1, 0.25, 0, 0, 0, 0.5, 0, 0.5, 1, 0, 1, 1].scramble;
c= TempoClock.new;
p= ProxySpace.push(s, clock: c);
~out.ar(2);
~pattern1.kr(1);
~pattern2.kr(1);
~pattern3.kr(1);
~pattern4.kr(1);
~out.play;

/*--talk demo--*/

/*definition*/
(
~bassdrum= {arg t_trig, amp= 1, release= 2.5, freq= 100;
        Decay2.kr(t_trig, 0.01, release, amp)
        * SinOsc.ar([freq, freq*0.9], 0, 0.5)
}
)

~out.add(~bassdrum)
~bassdrum.set(\t_trig, 1)

/*change parameters*/
~bassdrum.set(\freq, 70, \release, 0.3)
~bassdrum.set(\t_trig, 1)

/*add an delay effect*/
~bassdrum.filter(1, {arg in; z= 0.2; in+CombN.ar(in, z, z, 3)})
~bassdrum.set(\t_trig, 1)
~bassdrum.filter(1, {arg in; in})

/*play pattern*/
~pattern1= StreamKrDur(Pseq([1, 0, 0, 1, 0, 1, 1, 1], inf), 0.25)
c.sched(c.timeToNextBeat(1), {~bassdrum.map(\t_trig, ~pattern1)})

/*swing*/
~pattern1= StreamKrDur(Pseq([1, 0, 0, 1, 0, 1, 1, 1], inf), Pseq((0.25*[1.2, 0.8]), inf))

/*add more drums*/
(
~snaredrum= {arg t_trig, amp= 1, release= 0.12;
        Decay2.kr(t_trig, 0.01, release, amp) * Pan2.ar(Resonz.ar(ClipNoise.ar(0.3), 1500, 0.5))
};
~out.add(~snaredrum);
)

/*play pattern*/
(
~pattern2= StreamKrDur(Pseq([0, 1, 0, 0], inf), 0.5);
c.sched(c.timeToNextBeat(1), {~snaredrum.map(\t_trig, ~pattern2)});
~pattern1= StreamKrDur(Pseq([0, 1, 0, 1, 0, 1, 0.25, 1], inf), 0.25);
~pattern2= StreamKrDur(Pseq([1, 0.25, 1, 1, 0, 0, 0, 0, 0.5, 0.5, 0, 0.5, 0, 0, 0, 0], inf), 0.125);
)

/*add a bass w/ random melody and change drum patterns*/
(
~bass= {arg freq= 60, amp= 0;
        RLPF.ar(Saw.ar([freq, freq*1.01], amp), SinOsc.kr(0.2, 0, 200, 500), 0.2, 0.1)
};
~out.add(~bass);
)
(
~pattern3= StreamKrDur(Pseq([1, 0, 0.5, 0.5, 0, 1, 1, 0.5, 0.25, 1, 0, 1, 0.5, 0, 1, 0]*0.6, inf), Pseq((0.125*[1.2, 0.8]), inf));
~pattern4= StreamKrDur(Pseq(b*100+20, inf), Pseq((0.125*[1.2, 0.8]), inf));
c.sched(c.timeToNextBeat(1), {~bass.map(\amp, ~pattern3)});
c.sched(c.timeToNextBeat(1), {~bass.map(\freq, ~pattern4)});
)

~out.release(2);
p.clear;

work with mark: supercollider sampler

for my work together with mark d'inverno i coded a few tools. one of the things that came up was a need for a neutral but nice sounding way to test aspects of our musical agents systems. so we got hold of a grand-piano sample library and i wrote a 'giga sampler' like class for supercollider. this allowed us to use this massive sample library (2.5gig) and let the agents play high quality samples instead of cheap midi or boring synthesised sounds. so for testing melodies, harmonies and such this was a good thing.

the trick with the giga sampler is that it preloads a bit from each soundfile into ram memory and then streams the rest from disk when needed. or at least this is how i understands it. so using this technique, one can get quick access to a lot more samples than normally would fit in the memory of a sampler. a massive library like ours with full 88keys range, sampled in many different velocities, would occupy ~5gig of ram (supercollider uses 32bit internally), nor could it be streamed from disk (the harddrive would be too slow to access and load the headers to play fast chords progressions without staggering etc).

i spent some time to make my class all round useful and it can be downloaded _here. it is called RedDiskInSampler.

and here is some testcode for it...

s.boot;
d= ();                                          /*for mapping midinote to filename*/
r.free
r= RedDiskInSamplerGiga(s);             /*sampler*/
(
var velocities= #[40, 96];              /*velocities to load*/
var octavesToLoad= #[2, 3];             /*how many octaves to load*/
var scale= #['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'];
/*so number of samples to load is octavesToLoad.size*12*velocities.size*/
/*lowest note C2= midi 36*/
velocities.do{|x, i|
        var tempDict= ();
        d.put(x, tempDict);
        octavesToLoad.do{|y, j|
                scale.do{|z, k|
                        var midinote, pathname, key;
                        midinote= y*12+12+k;
                        key= z++y;
                        pathname= "sounds/bosen44100/"++x++"/"++key++".aif";
                        key= (key++"_"++x).asSymbol;
                        tempDict.put(midinote, key);            /*like (45 : A2_96)*/
                        r.preload(key, pathname);
                };
        };
};
)
r.play(\C2_96, 0, 3, 1)
r.play(\D2_40, 0, 3, 1)
Tdef(\test).play
a= r.loadedKeys;
(Tdef(\test, {
        inf.do{|i|
                r.play(a.choose, 0, 0.45, 0.1);
                0.5.wait;
        }
}))
(Tdef(\test, {
        b= a.asArray.scramble;
        inf.do{|i|
                b.do{|x, j|
                        r.play(x, 0, 0.35, 0.1);
                        0.15.wait;
                };
                2.wait;
        }
}))
(Tdef(\test, {
        b= a.asArray.sort;
        inf.do{|i|
                b.do{|x, j|
                        r.play(x, 0, 0.25, 0.08, amp: 0.6);
                        0.1.wait;
                };
                1.wait;
        }
}))
Tdef(\test).stop
r.free

work with mark: old system design2

another thing mark d'inverno and i did was to try to list all the things our musical method agents possibly could do. this was of course an impossible task but still it gave us an overview and was a pretty fun and crazy project.

version 040511 /fredrik

CHORD:
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
* transpose a note tone up/down
* transpose some notes tone up/down
* transpose all notes tone up/down
* transpose a note octave up/down
* transpose some notes octave up/down
* transpose all notes octave up/down
* making more/less dissonant by transposing notes up/down
* shift by inverting
* inverting parts of the chord
* removing highest/lowest note
* removing middle note
* removing every other note
* removing dissonances
* adding the whole chord octave up/down
* adding higher/lower note
* adding note in the middle
* adding notes in between
* adding dissonant notes
* detune a note up/down from current tuning (eg +10 cents)
* detune some notes up/down from current tuning (eg +10 cents)
* detune all notes up/down from current tuning (eg +10 cents) (ie pitchbend whole chord)

* transpose a note tone up/down in current modus
* transpose some notes tone up/down in current modus
* transpose all notes tone up/down in current modus
* making more/less dissonant by transposing notes up/down in current modus
* shift by inverting in current modus
* inverting parts of the chord in current modus
* removing root of current modus
* removing middle notes in current modus (eg 3rd, 5th)
* removing extension notes in current modus (ie E13#11 -> E9#11 -> E9 -> E7 -> E)
* adding higher/lower note in current modus
* adding note in the middle in current modus
* adding notes in between in current modus
* adding extension notes in current modus (ie E -> E7 -> E9 -> E9#11 -> E13#11)
* adding root from another modus (eg E/A)
* adding extension chord from another modus (eg F#/E7)
* replace with parallel chord (eg C -> Am)
* detune a note up/down from current tuning to another tuning (eg from just to 14 tone equal tuning)
* detune some notes up/down from current tuning to another tuning (eg from just to 14 tone equal tuning)
* detune all notes up/down from current tuning to another tuning (eg from just to 14 tone equal tuning)

* replace with chord sequence current modus (eg II-V7-I)
* replace with chord sequence from another modus
* arpeggiate up/down
* rhythmisize some notes in sequence
* rhythmisize all notes in sequence
* rhythmisize some notes in parallel
* rhythmisize all notes in parallel
* change duration of a note
* change duration of some notes
* change duration of all notes

* replace with chord sequence current modus (eg II-V7-I) in current time
* replace with chord sequence from another modus in current time
* arpeggiate up/down in current time
* rhythmisize some notes in sequence in current time
* rhythmisize some notes in parallel in current time
* rhythmisize all notes in sequence in current time
* rhythmisize all notes in parallel in current time
* change duration of a note in current time
* change duration of some notes in current time
* change duration of all notes in current time

* change volume/attack/decay/sustain/release of a note
* change volume/attack/decay/sustain/release of some notes
* change volume/attack/decay/sustain/release of all notes

* change timbre/instrumentation of a note
* change timbre/instrumentation of some notes
* change timbre/instrumentation of all notes

* change position in space for a note
* change position in space for some notes
* change position in space for all notes

MELODY:
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
* transpose a note tone up/down in or outside current modus
* transpose some notes tone up/down in or outside current modus
* transpose all notes tone up/down in or outside current modus
* transpose a note octave up/down
* transpose some notes octave up/down
* transpose all notes octave up/down
* invert melody in or outside current modus
* scale interval range in or outside current modus (ie shrink or expand)
* transpose a note to match another modus
* transpose some notes to match another modus
* transpose all notes to match another modus
* replace a note with a few others in or outside current modus
* replace some notes with a few others in or outside current modus
* detune a note up/down from current tuning (eg +10 cents)
* detune some notes up/down from current tuning (eg +10 cents)
* detune all notes up/down from current tuning (eg +10 cents) (ie pitchbend whole melody)

* remove a note (ie pause)
* remove some notes (ie pause)
* remove notes with duration < x
* remove notes with duration > y
* remove notes with duration < x and > y
* change duration of a note in or outside time
* change duration of some notes in or outside time
* change duration of all notes in or outside time
* change duration and onset of a note in or outside time (timescale)
* change duration and onset of some notes in or outside time (timescale)
* change duration and onset of all notes in or outside time (timescale whole melody)
* make duration and onset of all notes shorter and repeat (eg divide time by 2 and play twice)
* make duration and onset of all notes shorter and play a variation instead of repeating

* play melody in retrograd
* play notes in retrograd but keep rhythm/duration
* play rhythm/duration in retrograd but keep notes
* play and repeat only sections of the melody
* shift notes some steps left/right but keep rhythm/duration
* shift rhythm/duration some steps left/right but keep notes
* randomize notes but keep rhythm/duration
* randomize rhythm/duration but keep notes
* replace a note but keep rhythm/duration
* replace some notes but keep rhythm/duration
* replace all notes but keep rhythm/duration
* replace a rhythm/duration but keep notes
* replace some rhythm/duration but keep notes
* replace all rhythm/duration but keep notes

* decrease or increase the number of notes in the current scale (quantify notes ie minimal effect in istreet)
* decrease or increase the number of possible rhythms (quantify rhythms)

* change rhythm/duration continuously (eg ritardando)
* change rhythm/duration discrete (eg ritardando in time)

* repeat a note and rhythm/duration x times in or outside time (ie delay effect)
* repeat some notes and rhythm/duration x times in or outside time (ie delay effect)
* repeat all notes and rhythm/duration x times in or outside time (ie delay effect)

* rearrange notes in inc/dec order but keep rhythm/duration
* rearrange rhythm/duration in inc/dec order but keep notes

* add another voice in parallel to the melody
* add many other voices in parallel to the melody
* add another voice mirroring the melody
* add many other voices mirroring the melody in different ways
* add another standalone voice to the melody
* add many other standalone voices to the melody
* add another standalone voice contrasting the melody
* add many other standalone voices contrasting the melody

* change volume/attack/decay/sustain/release of a note
* change volume/attack/decay/sustain/release of some notes
* change volume/attack/decay/sustain/release of all notes

* change timbre/instrumentation of a note
* change timbre/instrumentation of some notes
* change timbre/instrumentation of all notes

* change position in space for a note
* change position in space for some notes
* change position in space for all notes

* reharmonize melody with 'good sounding' chords
* reharmonize melody with weird chords
* play melody in a different context
* play melody in another mood (eg sad, energetic or irritated)
* incorporate elements from other melodies
* blend two or more melodies (eg average note for note or play sections of each one)
* improvise freely over the melody

RHYTHM PATTERN:
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
* scale pattern with a constant factor in or outside time
* scale pattern with a changing factor in or outside time (eg random walk lfo for fluctuation)
* make duration and onsets in pattern shorter and repeat (eg divide time by 2 and play twice)
* make duration and onsets in pattern shorter and play a variation instead of repeating

* remove an element (ie pause)
* remove some elements (ie pause)
* remove elements with duration < x
* remove elements with duration > y
* remove elements with duration < x and > y
* change duration of an element in or outside time
* change duration of some elements in or outside time
* change duration of all elements in or outside time
* replace an element with a few others in or outside time
* replace some elements with a few others in or outside time
* replace all elements with a few others in or outside time
* add an element at random position in or outside time
* add some elements at random position in or outside time
* add an element in the middle in or outside time

* change position of an element to random in or outside time
* change position of some elements to random in or outside time
* change position of some all elements to random in or outside time (scramble pattern)

* repeat an element x times in or outside time (ie delay effect)
* repeat some elements x times in or outside time (ie delay effect)
* repeat all elements x times in or outside time (ie delay effect)

* play pattern backwards
* play and repeat only sections of the pattern
* rearrange elements in inc/dec duration order

* quantise an element to current time
* quantise some elements to current time
* quantise all elements to current time

* add another voice with different timbre/instrumentation in parallel to the pattern
* add many other voices with different timbre/instrumentation in parallel to the pattern
* add another voice mirroring the pattern rhythmically
* add many other voices mirroring the pattern in different ways
* add another standalone voice to the pattern
* add many other standalone voices to the pattern
* add another standalone voice contrasting the pattern
* add many other standalone voices contrasting the pattern

* change volume/attack/decay/sustain/release of an element
* change volume/attack/decay/sustain/release of some elements
* change volume/attack/decay/sustain/release of all elements

* change timbre/instrumentation of an element
* change timbre/instrumentation of some elements
* change timbre/instrumentation of all elements

* change position in space for an element
* change position in space for some elements
* change position in space for all elements

* vary the pattern based on some scheme (eg nick's bbcut)
* play pattern in another mood (eg sad, energetic or irritated)
* incorporate elements from other patterns
* blend two or more patterns (eg average elements and threshold or play sections of each one)
* improvise freely over the pattern

EFFECTS: (very much in progress)
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
* delay: dubdelay, looping with infinite delay
* filter: high/band/low pass, ringing filters
* panning: surround
* timestretch
* pitchshift
* segmenting/cutting: warp, scratch
* phase modulation
* amplitude modulation: tremolo, lfo clipping/gate, ringmodulation
* mixing with another soundv
* frequency modulation: vibrato
* distortion: overdrive, bitcrunch
* fft manipulations: convolution, vocoder
* limiter, expander, compressor, gate
* feedback: modulate local amp, phase, freq etc.
* grain: segment with different envelopes, panning, amplitude, frequency etc.
* amplitude follower and map to another sound
* pitch tracker and map to another sound

work with mark: old system designs

so mark d'inverno and i worked on quite a few different systems. they all differed but the main goal remained fix: we wanted a responsive music system built from a multi-agent system approach.

the first ideas circled around a multi-agent band.

We originally consider the idea of a multi-agent band, but this was soon dismissed because of the complexity involved with feedback. How does one agent perceive the output of another agent; in effect how do we give that agent ears? The only possibility is to allow one agent to look at some portion of the code that is being generated but it is not clear how you could do this with any semblance of replicating the real-life improvising group of musicians.

Some questions that arose in considering how to build such a band. How aware should the agents be of its fellow musicians? What interplay do we allow between musicians and how to we facilitate this? Should there be one agent per style/piece/genre or is the agent an all round virtuous that can play in different styles/genres? Does an agent know what notes to play by itself or is it handed a score and told what and when to play? Is the agent itself responsible for different manipulations and effects to the sound it generates, or are there other agents deciding this? Perhaps, there is a project for someone else out there?

we abandoned that and tried to simplify a bit. the design we came up with next is sketched out here...

and our ideas around its design was as follows...

A basic design

In order to address some of the issues and motivations outlined in this document we propose a multi-agent architectures with the following design elements.

1. It would be responsive to the external environment: time, date, day, season, temperature, humidity, sunlight, individual and collective human actions, rainfall, wind, ambient sound level of what is happening (should be able to record this sound and play it back).

2. It would not be beat-based per se, but there might be beat elements. Rhythm more seen as an important parameter along with others - not being the predominant one.

3. We are interested in exploring the notion of harmony and melody and how this relates to the emotional state of a user. Naturally, we also want to build something aesthetically pleasant.

4. We will employ a multi-agent system architecture to manage the different elements that can take place in a music system. Agents will negotiate and compromise for control over modifying parameters and using various hard-coded methods, and hence the systems overall output.

Interface agents will monitor some aspects of the environment and may communicate with each other. We will build one agent for each different environmental parameter we wish to measure. It may be that these agents are requested to look for certain things in the environment. For example the human activity agent might be asked to look out for a certain pattern of behaviour or to notify when the number of humans goes above a certain threshold. In other words these agents are more than sensors and can change what they are trying to perceive after receiving information from other agents.

Abstract task agents will be able to collect information from some or all of the interface agents. But they will have different features and goals and will therefore need to negotiate to resolve conflicts. For example, they might agree to take it in turns to chose when there is a conflict over what should happen to the output next.

We have identified several possible abstract task agents

1. A responsive agent that wishes to respond to external stimulus

2. A control agent who wants to try and provide a user with a desired or intended response

3. A Generative agent who will try to negate and provide counter-intuitive or meaningless responses, and also try to kick-start or trigger events even when there is no external stimulus currently taking place.

4. A melody agent who tries to create an aesthetic piece giving it's understanding of traditional harmony and melody. It may work with the generative agent in many cases, asking for input and denying or accepting the ideas based on it's own rules about what is appropriate.

5. We could have a harmonising agent that attempts to provide a harmonisation of a particular piece too?

6. A mood agent wants to resonate environment mood - both 'get' and 'set'.

7. A historical agent wants to repeat things happened before and maybe start record sounds when there are drastic changes in the interface agents and so on.

These agents all have a notion of the current state. These then negotiate to call the various methods at its disposal. The method agents who have very specific abilities such as a low pass filter, changing harmonic density, playing sound samples of a specific category, adding overtones. As these methods have an effect on the suitability of each other they should negotiate first. These agents do not have a notion of the current state only some idea of their possibly effects on each other. We believe there is a relationship between mood and method and we will try and harness this to build a sound device which has the basic attributes we described at the beginning of this document.

Towards a Prototype System

The Interface Agents
One restriction to put upon our system is to quantise time and let the interface agents work at three distinct time scales; short, medium and long. This restriction would be general for all agents interfacing the external environment.

For example, the agent aware of light could keep abrupt changes in its short time memory e.g. someone flicks the light switch or passes by a window, the amount of daylight is stored in its medium term memory and last the agent's long time memory keeps track of seasonal changes, full moon etc.

The agent responsible for monitoring human activity should work in a similar way. Short term would here be gestures, medium: amount of people and their activity in the environment and long term memory is general use and popularity of the system, people's habits, change of office hours etc.

The temporal agent agent might in its slot for short time memory keep hours, minutes and seconds. Medium scale memory could contain time of day and what weekday (sunday-morning/monday-lunch/firday-night) and long term the time of year (winter/late-spring/early-autumn...) Wind, heat, humidity and the rest of the interface agents would all work in a similar way.

Internally these different scales would poll each other for information, averaging and calculating their respective summary short-medium-long. For medium and long term memory logfiles will be written to disk for backup. Depending on which mappings we want to do and what results we want from the interface part of the system, the agents here need to communicate with each other in different ways. E.g. if we need some musical parameter to be changed (or action to be taken) when the room is crowded and hot, we could implement that to the human interface agent. It would utilise the motion tracking sensor to see if there are - presently - many people about, look in its medium term memory to figure out if there's a tendency/crowd gathering and also communicate with the heat agent to see if it has similar input.
There can also be a direct one-to-one mapping between the agent's discrete points in time and some musical parameters. How many and which parameters we choose to control here will decide how directly responsive the system output will be. Possibly the degree of direct mapping (mostly concerning short-time) can be varied over time. For a first-time user it might be a good thing if the direct feedback is slightly exaggerated. He/she would like to get instant gratification to become comfortable with that the system really is working, alive and reacting. But after time - to keep interest up - other things could become more important like the combination of sensors or 'musical' music progression. These direct mappings could also be present all the time but scaled up/down and mixed with other controllers.

[...]

The actual data from the sensors can be handled in two different ways and hopefully we can try out a combination of both. The first way would be to automatically normalise incoming values to a range of say 0.0-1.0. So if the program detects a peak greater than the previous maximum peak, it will replace the old with the new value and start using that as scaling factor. This will make the sensors adapt to any environment and its extreme conditions.
The other way of handling the input would be to assume a probable range for each sensor and just clip extreme values. The advantage here is that the system won't be less responsive over time (eg. some vandal screams in the microphone and sets the peak level to an unreasonable value - making the microphone non-sensitive to subtle background noise amplitude later on). The drawback is that the system needs to be tuned for each new location and that over a longer period of time. Ideal is a combination of the two that does adapt directly but also falls back to some more reasonable default or average if someone messes with it or something breaks (i.e. will disregard totally unlikely extreme peaks).
After normalisation we will scale the inputs from the different sensors. This will allow us to tune our system and change weight of importance for each interface agent. But to begin with we'll just assume equal power for all sensors.

It is our aim to build a flexible and modular system that can be installed in many different environments/locations and with changing budgets. So if sensors aren't present, breaks or have to be exchanged for some other type, we only instantiate a new interface agent reading from that particular sensor or device. The system should run with any number of interface agents and any number of sensors.

We also see the possibility of adding, to the list of interface agents, a few 'proxy' interface agents. These would work for any device or stream of data and would look for abrupt changes, tendencies and overall activity (at three discrete times). The users would decide what to read from. Examples of input for these proxies could be home-built sensors that the users bring with them and plug into a slot, some device or installation already present nearby the location where our system is set up or maybe stock market data downloaded from the net. Having these proxies would make each installation unique and site specific also on the hardware input side.

Implementation of the interface agents will be done by having an abstract superclass (pseudo code below):

InterfaceAgent {        //abstract class
        short {
                //method looking for quick changes like gestures and transients
        }
        medium {
                //method using this.short to calculate tendencies
        }
        long {
                //method using this.medium to find out about long term use and overall activity
        }
}

Then the actual interface agent classes will all inherit behaviour from this superclass.

Wind : InterfaceAgent
Light : InterfaceAgent
Humans: InterfaceAgent
Proxy : InterfaceAgent

etc.

If time permits we'd also like to try to implement an agent that listens to the output of the system itself in an aesthetic way. It should evaluate the resulting music as groovy, soft, good, bad or less interesting. Machine listening is of course a huge project in itself but some rough presumptions could be done with the help of the other interface agents. A change in the music that for instance instantly empties the room of people should be considered appropriate. So there's already the microphone(s) listening to the sound output in different ways (amplitude, pitched sounds) but a more intelligent analysis of the resulting music would be a good thing that'd boost the complexity of the whole system by introducing yet more feedback.

The Abstract Task Agents and Method Agents
How to implement a task agent like mood? Where will the rules reside defining an emotion like happiness - in each of the method agents or within the task agent itself - or both? Below are two suggested implementations with corresponding bare-bone sounding examples.

1. Method agents are responsible for how to best be happy/sad.

In this example the method agents know themselves how to best reflect an emotion. E.g. lowpass agent understands the message 'happy' (coming from the task agent) and reacts to that by increasing cutoff frequency 300 Hz. Likewise a 'sad' message would decrease cutoff frequency by 300. Another rule could be a melody agent that, when receiving a 'happy' message, changes its currently playing melody to major key and raises its tempo a little.

Simplest possible sounding example written in SuperCollider:

Starting out with three class definitions:

MethodAgent {                                           //abstract superclass
        var &gt;synth;
        *new { arg synth;
                ^super.new.synth_(synth);
        }
        update { arg param, value;
                synth.set(param, value);                //send parameters to the synth
        }
}
Mlowpass : MethodAgent {                        //lowpass agent subclassing MethodAgent
        var freq= 700;
        happy {
                freq= (freq+300).clip(100, 9000);       //rule 1
                this.update(\freq, freq);
        }
        sad {
                freq= (freq-300).clip(100, 9000);       //rule 2
                this.update(\freq, freq);
        }
}
Mmelody : MethodAgent {                         //melody agent subclassing MethodAgent
        var third= 64, rate= 2;
        happy {
                third= 64;                                              //rule 3
                rate= (rate/0.9).clip(0.1, 10);         //rule 4
                this.update(\third, third.midicps);
                this.update(\rate, rate);
        }
        sad {
                third= 63;                                              //rule 5
                rate= (rate*0.9).clip(0.1, 10);         //rule 6
                this.update(\third, third.midicps);
                this.update(\rate, rate);
        }
}

In the above code rule 1 says: when happy - increase lowpass cutoff frequency by 300 but restrain values between 100 and 9000. Rule 3 would be: when happy - set the third scale position of the melody to be a major third. Rule 6: when sad- decrease melody tempo 10% but restrain values to between 0.1 and 10 (beats-per-second).

To try it out we first need to define two synths - one playing a lowpass filter and another one playing a simple melody.

s.boot; //start the supercollider sound server
a= SynthDef(\lowpass, {arg freq= 700; ReplaceOut.ar(0, LPF.ar(In.ar(0), freq))}).play(s);
b= SynthDef(\melody, {arg third= 329.63, rate= 2; Out.ar(0, Saw.ar(Select.kr(LFNoise0.kr(rate, 1.5, 1.5), [60.midicps, third, 67.midicps]), 0.1))}).play(s);

Then we create our two task agents.

x= Mlowpass(a);         //create an abstract task agent and pass in a synth that plays a lowpass filter
y= Mmelody(b);          //create an abstract task agent and pass in a synth that plays a simple melody

The actual mood messages are then sent in the following manner (imagine this done from the mood agent):

x.happy; y.happy;               //send message 'happy' to both task agents
x.sad; y.sad;                   //send message 'sad' to both task agents

This design will make the task agents less bloated and it will be easy to change, add or remove method agents. The mood agent will just tell all available method agents to become happy and it can then focus on negotiation with other task agents.

2. Task agent is responsible for how to best be happy/sad.

This is exactly the same code example as above but rewritten to gather all our rules defining happy and sad inside the mood agent. First four new class definitions:

AbstractTaskAgent {
        var &gt;lpass, &gt;melody;
        *new { arg lpass, melody;
                ^super.new.lpass_(lpass).melody_(melody);
        }
}
Mood : AbstractTaskAgent {                      //mood agent subclassing AbstractTaskAgent
        happy {
                lpass.freq= (lpass.freq+300).clip(100, 9000);   //rule 1
                melody.third= 64;                                               //rule 3
                melody.rate= (melody.rate/0.9).clip(0.1, 10);   //rule 4
        }
        sad {
                lpass.freq= (lpass.freq-300).clip(100, 9000);   //rule 2
                melody.third= 63;                                               //rule 5
                melody.rate= (melody.rate*0.9).clip(0.1, 10);   //rule 6
        }
}
Mlowpass2 : MethodAgent {               //different lowpass agent subclassing MethodAgent
        var &lt;freq= 700;
        freq_ { arg val;
                this.update(\freq, val);
        }
}
Mmelody2 : MethodAgent {                //different melody agent subclassing MethodAgent
        var &lt;third= 64, &lt;rate= 2;
        third_ { arg val;
                this.update(\third, val.midicps);
        }
        rate_ { arg val;
                this.update(\rate, val);
        }
}

Here is the same code as above for defining two synths.

s.boot; //start the supercollider sound server
a= SynthDef(\lowpass, {arg freq= 700; ReplaceOut.ar(0, LPF.ar(In.ar(0), freq))}).play(s);
b= SynthDef(\melody, {arg third= 329.63, rate= 2; Out.ar(0, Saw.ar(Select.kr(LFNoise0.kr(rate, 1.5, 1.5), [60.midicps, third, 67.midicps]), 0.1))}).play(s);

And this is how the mood agent is set up.

z= Mood(Mlowpass2(a), Mmelody2(b));

Last we send the messages to the mood agent like this:

z.happy;
z.sad;

Here method agents are very simple and only does what told. They can return their state and change the sound that they're in control of and that is it. It is the mood agent that knows what happiness means and will direct the methods agents to do certain things.
A good thing with this design is that the rules defining happiness are all in one place and it would be possible to write different versions of the mood agent that implements happiness in different ways. One drawback would be that for any change to the method agents, we would have to update and rewrite parts of the mood class.

Presently it seems like suggestion number two would be the design most suitable for our needs. This would mean that crosstalk between method agents isn't needed anymore as suggested in version one (see sketch#1). Conflicting or overlapping methods are rather dealt with by the task agents as they know which actions to take to come up with the desired result. On the other hand the method agents need to be able to report their state back to the method agents and also be intelligent enough to take their own actions e.g. telling when certain tasks are finished.

work with mark: job as a research fellow

between march 2004 and march 2006 i had the fantastic opportunity to work as a research fellow for prof. mark d'inverno (http://www2.wmin.ac.uk/~dinverm/). it was a part time engagement at the math department at University of Westminster : cavendish school of computer science in london.
mark is a math professor specialising in intelligent agents and also a great pianist. our goal was to combine his (and mine) two big interests by creating musical agents that could improvise, jam and play together.
i learned a lot in the process and had to read up on math, specification languages, agent theories, genetic algorithms etc. i found so much interesting stuff out there to get totally absorbed in. this blog is partly started to document our research.

Pages

Subscribe to RSS - research