소스 검색

Channel fixes.

- And a change in the way YSynth shuts down SDL audio.
master
Bas Weelinck 13 년 전
부모
커밋
22bc0333b6
4개의 변경된 파일176개의 추가작업 그리고 22개의 파일을 삭제
  1. +2
    -0
      .gitignore
  2. +17
    -3
      hoi2.py
  3. +13
    -1
      pymod.c
  4. +144
    -18
      ysynth.py

+ 2
- 0
.gitignore 파일 보기

@@ -0,0 +1,2 @@
*.swp
*.pyc

+ 17
- 3
hoi2.py 파일 보기

@@ -13,9 +13,23 @@ synth = YSynth()
#synth.set_graph(WhiteNoise() * 1.0) #synth.set_graph(WhiteNoise() * 1.0)


# Should sound vagely similar to a flanged whitenoise signal # Should sound vagely similar to a flanged whitenoise signal
#noise = WhiteNoise()
#synth.set_graph((noise + noise[50 + 100 * (Sin(0.1) + 1)]) * 0.5 * (RevSaw(2) + 1) * 0.5)
synth.set_graph(Sin(440) * RevSaw(5) * 0.2)
noise = WhiteNoise()
synth.set_graph((noise + noise[50 + 100 * (Sin(0.1) + 1)]) * 0.5 * (RevSaw(2) + 1) * 0.5)
#synth.set_graph(Sin(440) * RevSaw(5) * 0.2)
#synth.set_graph((Sin(440) * 0.5, Sin(220) * 0.5))
#synth.set_graph(Sin((440, 220)) * 0.5)

def short_pulse(shortness, silence):
z = (Square(2. ** shortness) + 1) * 0.5
for i in xrange(silence):
z = z * (Square(2. ** (shortness - i)) + 1) * 0.5

return z

# Echo effect
#sine_pulse = Sin(440) * short_pulse(3, 4) * 1.0
#sine_pulse = sine_pulse * 0.9 + sine_pulse[2000] * 0.1
#synth.set_graph(sine_pulse)


def harmonics(osc, freq, count): def harmonics(osc, freq, count):
return reduce(lambda a, b: a + b, return reduce(lambda a, b: a + b,


+ 13
- 1
pymod.c 파일 보기

@@ -18,7 +18,18 @@ void PyYSynthContext_Dealloc(PyYSynthContext *self)
{ {
puts("_ysynth: Dealloc context"); puts("_ysynth: Dealloc context");
if (self->active) { if (self->active) {
/* Allow any running Python audio routines
* to finish their current chunk, then lock out
* the audio callback and shut the audio device
* down.
* It is important to first unlock the GIL otherwise
* we're at risk of causing deadlock.
*/
Py_BEGIN_ALLOW_THREADS
SDL_LockAudio();
SDL_CloseAudio(); SDL_CloseAudio();
SDL_UnlockAudio();
Py_END_ALLOW_THREADS
self->active = 0; self->active = 0;
} }


@@ -188,7 +199,8 @@ static PyObject *_ysynth_set_callback(PyObject *self, PyObject *args)
if (PyArg_ParseTuple(args, "O!O", &PyYSynthContext_Type, (PyObject**)&ctx, if (PyArg_ParseTuple(args, "O!O", &PyYSynthContext_Type, (PyObject**)&ctx,
&callback)) { &callback)) {
/* We don't have to lock SDL audio because Python's GIL will /* We don't have to lock SDL audio because Python's GIL will
* have locked out the callback for us
* have locked out the C callback for us, only this Python thread
* can be executing at this time.
*/ */
Py_XDECREF(ctx->callback); Py_XDECREF(ctx->callback);
ctx->callback = callback; ctx->callback = callback;


+ 144
- 18
ysynth.py 파일 보기

@@ -47,24 +47,35 @@ class YSynth(object):
""" """


# Outputs silence if no graph available # Outputs silence if no graph available
if not self.graph:
if self.graph is None:
channels.fill(0)
return return


# Process audio graph
buf_len = channels.shape[0]
self.chunk_size = buf_len
try:
# Process audio graph
buf_len = channels.shape[0]
self.chunk_size = buf_len


next_chunk = next(self.graph)
# Do we need to mix from mono to stereo?
if len(next_chunk.shape) == 1:
next_chunk = np.dot(np.reshape(next_chunk, (-1, 1)), ((1, 1),))
#print channels.shape, next_chunk.shape
np.round(next_chunk * 32767.5, 0, out=channels)
next_chunk = next(self.graph)
# Do we need to mix from mono to stereo?
if len(next_chunk.shape) == 1:
next_chunk = np.dot(np.reshape(next_chunk, (-1, 1)), ((1, 1),))
#print channels.shape, next_chunk.shape
np.round(next_chunk * 32767.5, 0, out=channels)


# Advance sampleclock
self.samples += buf_len
# Advance sampleclock
self.samples += buf_len

# When this happens set the graph back to
# None, this suppresses futher errors, and let's
# a first exception slip through, causing a backtrace
# in ysynth.
except StopIteration:
self.graph = None


def set_graph(self, graph): def set_graph(self, graph):
if isinstance(graph, tuple):
graph = ChannelJoiner(*graph)
graph.set_synth(self) graph.set_synth(self)
self.graph = iter(graph) self.graph = iter(graph)


@@ -110,10 +121,67 @@ class YConstant(object):


def __call__(self): def __call__(self):
while True: while True:
yield self.const
x = np.empty((self.synth.chunk_size,))
x.fill(self.const)
yield x


def set_synth(self, synth): def set_synth(self, synth):
pass
self.synth = synth

# XXX: Write a function that broadcasts
# various channeled signals to a common
# shape for operations that require this.
def broadcast(stream):
"""
This generator takes an iterator yielding a variable number of arrays and
scalars and broadcasts any arrays and scalars that require this to the
correct amount of channels. If an array argument could not be normalized,
this function raises a ValueError with the appropriate information.
"""

channels = 0
channels_shape = None
require_broadcast = False
stream = iter(stream)

# First, check for a single multichannel count, if not
# we need to raise an exception.
args = next(stream)
for c in args:
if len(c.shape) == 2 and c.shape[1] != 1:
if not channels:
channels = c.shape[1]
channels_shape = c.shape
else:
if channels != c.shape[1]:
raise ValueError("Cannot broadcast input signals of"
" shape %r and %r together." % (channels_shape, c.shape))
else:
# There is no need for broadcasting if no differently sized
# channels occur
if channels:
require_broadcast = True

# Okay we need to broadcast every argument to 'channels' nr. of channels.
if require_broadcast:
while True:
r = []
for c in args:
if len(c.shape) != 2:
c = np.reshape(c, (-1, 1))
if c.shape[1] != channels:
r.append(np.dot(c, ([1] * channels,)))
else:
r.append(c)

yield tuple(r)
args = next(stream)

# Or we don't need to broadcast in which case we simply pass
# on our results
while True:
yield(args)
args = next(stream)


class YAudioGraphNode(object): class YAudioGraphNode(object):
""" """
@@ -127,9 +195,21 @@ class YAudioGraphNode(object):
self.inputs = [] self.inputs = []
lens = 0 lens = 0


# Convert input arguments to audio streaming
# components
for stream in inputs: for stream in inputs:
# Attempt to join multiple channels
if isinstance(stream, tuple):
if len(stream) == 1:
stream = stream[1]
else:
stream = ChannelJoiner(*stream)

# Convert constant value to audio stream
if not isinstance(stream, YAudioGraphNode): if not isinstance(stream, YAudioGraphNode):
stream = YConstant(stream) stream = YConstant(stream)

# Append to input signals
self.inputs.append(stream) self.inputs.append(stream)


self.inputs = tuple(self.inputs) self.inputs = tuple(self.inputs)
@@ -174,7 +254,7 @@ class YAudioGraphNode(object):
# maybe this must be changed at a later time. # maybe this must be changed at a later time.


# Connect the actual generator components # Connect the actual generator components
sample_iter = iter(self(izip(*self.inputs)))
sample_iter = iter(self(broadcast(izip(*self.inputs))))


# Build the sample protection function # Build the sample protection function
def sample_func(): def sample_func():
@@ -230,11 +310,33 @@ class YAudioGraphNode(object):
def __rdiv__(self, other): def __rdiv__(self, other):
return Divisor(other, self) return Divisor(other, self)


def __getitem__(self, delay):
def __getitem__(self, key):
""" """
Return a delayed version of the output signal. Return a delayed version of the output signal.
""" """
return Delay(self, delay)

# Return delay
if isinstance(key, int) or isinstance(key, float) or \
isinstance(key, YAudioGraphNode):
return Delay(self, key)

# Split off channels
elif isinstance(key, slice):
if x.start is None:
start = 0
if x.stop is None:
# XXX Channels here
# FIXME: Incomplete code
stop = self.hoi
if x.step is None:
step = 1
rng = xrange(start, stop, step)

else:
rng = key

# XXX FIXME Incomplete channel split code
return # Return channel ranges


def __next__(self): def __next__(self):
""" """
@@ -299,6 +401,16 @@ class Delay(YAudioGraphNode):
# Finally update sample pointer # Finally update sample pointer
samples = (samples + self.synth.chunk_size) % buf.shape[0] samples = (samples + self.synth.chunk_size) % buf.shape[0]


# Tracker
# XXX: Incomplete class
class SimpleStaticTracker(YAudioGraphNode):
"""
Think MOD file.
"""

def __init__(self, track):
self.track = track

# Base oscillator class # Base oscillator class
class YOscillator(YAudioGraphNode): class YOscillator(YAudioGraphNode):
def __call__(self, l): def __call__(self, l):
@@ -329,9 +441,11 @@ class YOscillator(YAudioGraphNode):
last_cycle = [-(ifreq / float(self.synth.samplerate))] last_cycle = [-(ifreq / float(self.synth.samplerate))]


# Compute cycle using IIR filter and np.fmod # Compute cycle using IIR filter and np.fmod
# XXX: I won't work with multichannel input
cycle, last_cycle = lfilter([1], [1, -1], freq / cycle, last_cycle = lfilter([1], [1, -1], freq /
float(self.synth.samplerate) * float(self.synth.samplerate) *
np.ones(self.synth.chunk_size), zi=last_cycle)
np.ones((self.synth.chunk_size,) + freq.shape[1:]),
zi=last_cycle, axis=0)
yield np.fmod(cycle, 1.0) yield np.fmod(cycle, 1.0)
last_cycle = np.fmod(last_cycle, 1.0) last_cycle = np.fmod(last_cycle, 1.0)


@@ -340,6 +454,17 @@ class YOscillator(YAudioGraphNode):
def oscillate(self, l): def oscillate(self, l):
raise NotImplementedError("Inherit this class") raise NotImplementedError("Inherit this class")


# Channel modifiers
class ChannelJoiner(YAudioGraphNode):
"""
Join multiple input channels into a single
output stream.
"""

def __call__(self, l):
for chunks in l:
yield np.hstack(np.reshape(chunk, (-1, 1)) for chunk in chunks)

# Noise signals # Noise signals
# XXX: I am probably not White Noise, fix me # XXX: I am probably not White Noise, fix me
# up at a later time. # up at a later time.
@@ -402,6 +527,7 @@ class Pulse(YOscillator):
yield np.array(pos > 0, dtype=float) yield np.array(pos > 0, dtype=float)


# Flanger effect # Flanger effect
# FIXME: Incomplete class
class Flanger(YAudioGraphNode): class Flanger(YAudioGraphNode):
""" """
Perform virtual tape flange by mixing the Perform virtual tape flange by mixing the


불러오는 중...
취소
저장