How to use the music21.note.Note function in music21

To help you get started, we’ve selected a few music21 examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github shimpe / canon-generator / canon-gen.py View on Github external
############################################################################

  # prepare some streams: one per voice
  # all bass notes of each chord form one voice
  # all 2nd notes of each chord form a second voice
  # ...
  # convert chords to notes and stuff into a stream
  streams = {}
  splitted_chords = chords.split(" ")
  for v in range(voices):
    streams[v] = music21.stream.Stream()
  # split each chord into a separate voice
  for c in splitted_chords:
    pitches = realize_chord(c, voices, octave, direction="descending")
    for v in range(voices):
      note = music21.note.Note(pitches[v])
      note.quarterLength = quarterLength
      streams[v].append(note)

  # combine all voices to one big stream
  totalstream = music21.stream.Stream()
  for r in range(stacking):
    for s in streams:
      totalstream.insert(0, copy.deepcopy(streams[s]))

  # add some spice to the boring chords. sugar and spice is always nice
  spiced_streams = [totalstream]
  for s in range(spice_depth):
    # each iteration spices up the stream that was already spiced up in the previous iteration,
    # leading to spicier and spicier streams
    spiced_streams.append(spiceup_streams(spiced_streams[s], scale))
github cuthbertLab / music21 / music21 / alpha / analysis / fixer.py View on Github external
'omr': measure stream,
                'expected': measure stream,
            }
            '''
            omrMeasure = stream.Measure()
            omrNote = note.Note('F')
            omrNote.duration = duration.Duration('whole')
            omrMeasure.append(omrNote)

            expectedFixedOmrMeasure = stream.Stream()
            expectedOmrNote = deepcopy(omrNote)
            expectedOmrNote.expressions.append(expressions.Turn())
            expectedFixedOmrMeasure.append(expectedOmrNote)

            midiMeasure = stream.Measure()
            turn = [note.Note('G'), note.Note('F'), note.Note('E'), note.Note('F')]
            midiMeasure.append(turn)

            returnDict = {
                'name': 'Single Turn Measure',
                'midi': midiMeasure,
                'omr': omrMeasure,
                'expected': expectedFixedOmrMeasure,
            }
            return returnDict
github cuthbertLab / music21 / music21 / spanner.py View on Github external
def testReplaceSpannedElement(self):
        from music21 import note, spanner

        n1 = note.Note()
        n2 = note.Note()
        n3 = note.Note()
        n4 = note.Note()
        n5 = note.Note()

        su1 = spanner.Slur()
        su1.addSpannedElements([n1, n3])

        self.assertEqual(su1.getSpannedElements(), [n1, n3])
        self.assertEqual(n1.getSpannerSites(), [su1])

        su1.replaceSpannedElement(n1, n2)
        self.assertEqual(su1.getSpannedElements(), [n2, n3])
        # this note now has no spanner sites
        self.assertEqual(n1.getSpannerSites(), [])
        self.assertEqual(n2.getSpannerSites(), [su1])

        # replace n2 w/ n1
github cuthbertLab / music21 / music21 / repeat.py View on Github external
def testExpandRepeatExpressionH(self):        
        # test one back repeat at end of a measure
        from music21 import stream, bar, note

        # simple da capo alone
        m1 = stream.Measure()
        m1.repeatAppend(note.Note('c4', type='half'), 2)

        m2 = stream.Measure()
        m2.repeatAppend(note.Note('e4', type='half'), 2)

        m3 = stream.Measure()
        m3.leftBarline = bar.Repeat(direction='start')
        m3.repeatAppend(note.Note('g4', type='half'), 2)
        m3.rightBarline = bar.Repeat(direction='end')

        m4 = stream.Measure()
        m4.repeatAppend(note.Note('a4', type='half'), 2)
        dcHandle = DaCapo('D.C.')
        m4.append(dcHandle)

        m5 = stream.Measure()
        m5.repeatAppend(note.Note('b4', type='half'), 2)

        s = stream.Part()
        s.append([m1, m2, m3, m4, m5])
github cuthbertLab / music21 / music21 / midi / translate.py View on Github external
def testMidiExportVelocityA(self):
        from music21 import note, stream

        s = stream.Stream()
        for i in range(10):
            #print(i)
            n = note.Note('c3')
            n.volume.velocityScalar = i/10.
            n.volume.velocityIsRelative = False
            s.append(n)

        #s.show('midi')        
        mts = streamHierarchyToMidiTracks(s)
        mtsRepr = repr(mts)
        #print(mtsRepr)
        self.assertEqual(mtsRepr.count('velocity=114'), 1)
        self.assertEqual(mtsRepr.count('velocity=13'), 1)
github cuthbertLab / music21 / music21 / alpha / analysis / ornamentRecognizer.py View on Github external
def testRecognizeTurn(self):
        # set up experiment
        testConditions = []

        n1 = note.Note('F#')
        n1Enharmonic = note.Note('G-')
        noteInTurnNotBase = note.Note('G')
        noteNotInTurn = note.Note('A')

        evenTurn = [note.Note('G'), note.Note('F#'), note.Note('E'), note.Note('F#')]
        for n in evenTurn:
            n.duration.quarterLength = n1.duration.quarterLength / len(evenTurn)

        delayedTurn = [note.Note('G'), note.Note('F#'), note.Note('E'), note.Note('F#')]
        delayedTurn[0].duration.quarterLength = 2 * n1.duration.quarterLength / len(delayedTurn)
        for i in range(1, len(delayedTurn)):
            smallerDuration = n1.duration.quarterLength / (2 * len(delayedTurn))
            delayedTurn[i].duration.quarterLength = smallerDuration

        rubatoTurn = [note.Note('G'), note.Note('F#'), note.Note('E'), note.Note('F#')]
        # durations all different, add up to 1
github cuthbertLab / music21 / music21 / mei / base.py View on Github external
- att.note.anl (all)

    **Contained Elements not Implemented:**

    - MEI.critapp: app
    - MEI.edittrans: (all)
    - MEI.lyrics: verse
    - MEI.shared: syl
    '''
    tagToFunction = {'{http://www.music-encoding.org/ns/mei}dot': dotFromElement,
                     '{http://www.music-encoding.org/ns/mei}artic': articFromElement,
                     '{http://www.music-encoding.org/ns/mei}accid': accidFromElement}

    # pitch and duration... these are what we can set in the constructor
    theNote = note.Note(safePitch(elem.get('pname', ''),
                                  _accidentalFromAttr(elem.get('accid')),
                                  elem.get('oct', '')),
                        duration=makeDuration(_qlDurationFromAttr(elem.get('dur')),
                                              int(elem.get('dots', 0))))

    # iterate all immediate children
    dotElements = 0  # count the number of  elements
    for subElement in _processEmbeddedElements(elem.findall('*'), tagToFunction, elem.tag, slurBundle):
        if isinstance(subElement, six.integer_types):
            dotElements += subElement
        elif isinstance(subElement, articulations.Articulation):
            theNote.articulations.append(subElement)
        elif isinstance(subElement, six.string_types):
            theNote.pitch.accidental = pitch.Accidental(subElement)

    # adjust for @accid.ges if present
github cuthbertLab / music21 / obsolete / noteStream_old.py View on Github external
def sortAscendingAllChords(self):
        '''
        runs chord.sortAscending for every chord
        '''
        pass 

    def splitIntoNoteStreams(self, fillWithRests = False, copyNotes = False):
        pass
    


class StreamException(Exception):
    pass

if (__name__ == "__main__"):
    (note1,note2,note3,note4) = (note.Note (), note.Note (), note.Note(), note.Note())
    note1.name = "C"; note2.name = "D"; note3.name = "E-"; note4.name = "F#"
    rest1 = note.Rest()
    rest1.duration.type = "eighth"
    note1.duration.type = "whole"; note2.duration.type = "half"
    note3.duration.type = "quarter"; note4.duration.type = "eighth"
    stream1 = Stream ([note1, note2, note3, rest1, note4])
    assert stream1.totalDuration == 8
    for tN in stream1:
        tN.duration.dots = 1
    a = stream1.lily.value
    assert common.basicallyEqual(a, r'''\override Staff.StaffSymbol #'color = #(x11-color 'LightSlateGray) \clef "treble" c'1. d'2. ees'4. r8. fis'8. ''')
    ts1 = meter.TimeSignature("2/4")
    stream1.applyTimeSignature(ts1)
    stream1.setNoteTimeInfo(True)
    print stream1.intervalOverRestsList
    noteFollowingRest = stream1.noteFollowingNote(rest1, True)
github cuthbertLab / music21 / music21 / base.py View on Github external
def testPickupMeauresBuilt(self):
        import music21
        from music21 import stream, meter, note
    
        s = stream.Score()
    
        m1 = stream.Measure()
        m1.timeSignature = meter.TimeSignature('4/4')
        n1 = note.Note('d2')
        n1.quarterLength = 1.0
        m1.append(n1)
        # barDuration is baed only on TS
        self.assertEqual(m1.barDuration.quarterLength, 4.0)
        # duration shows the highest offset in the bar
        self.assertEqual(m1.duration.quarterLength, 1.0)
        # presently, the offset of the added note is zero
        self.assertEqual(n1.getOffsetBySite(m1), 0.0)
        # the _getMeasureOffset method is called by all methods that evaluate
        # beat position; this takes padding into account
        self.assertEqual(n1._getMeasureOffset(), 0.0)
        self.assertEqual(n1.beat, 1.0)
        
        # the Measure.padAsAnacrusis() method looks at the barDuration and, 
        # if the Measure is incomplete, assumes its an anacrusis and adds
        # the appropriate padding
github cuthbertLab / music21 / music21 / braille / basic.py View on Github external
>>> chordWithAccidentals = chord.Chord(['C4', 'E-4', 'F#4'], quarterLength=4.0)
    >>> chordWithAccidentals.pitches[0].accidental = 'natural'
    >>> print(basic.chordToBraille(chordWithAccidentals, descending=True))
    ⠩⠐⠿⠣⠌⠡⠼
    '''
    music21Chord._brailleEnglish = []
    allPitches = sorted(music21Chord.pitches)
    direction = 'Descending'
    if descending:
        allPitches.reverse()
    else:
        direction = 'Ascending'

    chordTrans = []
    basePitch = allPitches[0]
    initNote = note.Note(basePitch, quarterLength=music21Chord.quarterLength)
    brailleNote = noteToBraille(music21Note=initNote, showOctave=showOctave)
    if brailleNote == symbols['basic_exception']:  # pragma: no cover
        environRules.warn('Chord {0} cannot be transcribed to braille.'.format(music21Chord))
        music21Chord._brailleEnglish.append('{0} None'.format(music21Chord))
        return symbols['basic_exception']
    chordTrans.append(brailleNote)
    music21Chord._brailleEnglish.append('{0} Chord:\n{1}'.format(
        direction, '\n'.join(initNote._brailleEnglish)))

    for currentPitchIndex in range(1, len(allPitches)):
        currentPitch = allPitches[currentPitchIndex]
        try:
            handlePitchWithAccidental(currentPitch, chordTrans, music21Chord._brailleEnglish)
        except KeyError:
            environRules.warn(
                'Accidental {0} of chord {1} cannot be transcribed to braille.'.format(