How to use the praatio.pitch_and_intensity function in praatio

To help you get started, we’ve selected a few praatio examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github timmahrt / ProMo / examples / pitch_morph_to_pitch_contour.py View on Github external
minPitch = 50
maxPitch = 350
stepList = utils.generateStepList(3)

fromName = "mary1"
fromWavFN = fromName + ".wav"
fromPitchFN = fromName + ".txt"
fromTGFN = join(root, fromName + ".TextGrid")

toName = "mary1_stylized"
toPitchFN = toName + ".PitchTier"

# Prepare the data for morphing
# 1st load it into memory
fromPitchList = pitch_and_intensity.extractPI(join(root, fromWavFN),
                                              join(root, fromPitchFN),
                                              praatEXE, minPitch,
                                              maxPitch, forceRegenerate=False)
fromPitchList = [(time, pitch) for time, pitch, _ in fromPitchList]

# Load in the target pitch contour
pitchTier = dataio.open2DPointObject(join(root, toPitchFN))
toPitchList = [(time, pitch) for time, pitch in pitchTier.pointList]

# The target contour doesn't contain enough sample points, so interpolate
# over the provided samples
# (this step can be skipped if there are enough sample points--a warning
# will be issued if there are any potential problems)
toPitchList = interpolation.quadraticInterpolation(toPitchList, 4, 1000, 0)

# 3rd select which sections to align.
github timmahrt / ProMo / examples / modify_pitch_accent_example.py View on Github external
####################################

tgFN = "mary1.TextGrid"
wavFN = "mary1.wav"
pitchIntensityFN = "mary1.txt"
originalPitchFN = "mary1.pitch"
outputWavFN = "mary1_accented.wav"
outputPitchFN = "mary1_accented.pitch"
minPitch = 75
maxPitch = 450

if not os.path.exists(rootOutputPath):
    os.mkdir(rootOutputPath)

# 1st - get pitch
piList = pitch_and_intensity.extractPI(join(root, wavFN),
                                       join(rootOutputPath, pitchIntensityFN),
                                       praatEXE, minPitch, maxPitch)
pitchList = [(timeV, pitchV) for timeV, pitchV, _ in piList]

dur = audioio.WavQueryObj(join(root, wavFN)).getDuration()
pointObj = dataio.PointObject2D(pitchList, dataio.PITCH, 0, dur)
pointObj.save(join(rootOutputPath, originalPitchFN))


# 2nd - get region to manipulate.  Let's make the subject more emphatic!
tg = tgio.openTextgrid(join(root, "mary1.TextGrid"))
tier = tg.tierDict["words"]
start, stop, _ = tier.entryList[0]  # Getting info for the first word

targetPitchList = [(timeV, pitchV) for timeV, pitchV in pitchList
                   if timeV >= start and timeV <= stop]
github timmahrt / pyAcoustics / examples / split_audio_on_silence.py View on Github external
specified by /numSegmentsToExtract/.  Otherwise,
                            all non-silent segments are kept.
    generateWavs - if False, no wavefiles are extracted, but you can look at
                    the generated textgrids to see which wavefiles would have
                    been extracted
    '''
    utils.makeDir(tgPath)
    utils.makeDir(pitchPath)
    utils.makeDir(subwavPath)
    
    name = os.path.splitext(fn)[0]
    
    piSamplingRate = 100  # Samples per second
    sampleStep = 1 / float(piSamplingRate)
    outputFN = os.path.splitext(fn)[0] + ".txt"
    motherPIList = pitch_and_intensity.extractPI(join(inputPath, fn),
                                                 join(pitchPath, outputFN),
                                                 praatEXE,
                                                 minPitch, maxPitch,
                                                 sampleStep=sampleStep,
                                                 forceRegenerate=False)

    # entry = (time, pitchVal, intVal)
    motherPIList = [float(entry[2]) for entry in motherPIList]
    
    # We need the intensity threshold to distinguish silence from speech/noise
    # Naively, we can extract this by getting the nth percent most intense
    # sound in the file naive_vad.getIntensityPercentile()
    # (but then, how do we determine the percent?)
    # Alternatively, we could consider the set of intensity values to be
    # bimodal -- silent values vs non-silent.  The best threshold is the one
    # that minimizes the overlap between the two distributions, obtained via
github timmahrt / pyAcoustics / examples / split_audio_on_tone.py View on Github external
def audiosplitOnTone(inputPath, fn, pitchPath, tgPath, subwavPath,
                     minPitch, maxPitch, toneFrequency, minEventDuration,
                     praatEXE, praatScriptPath, forceRegen,
                     generateWavs=False):
    
    utils.makeDir(pitchPath)
    utils.makeDir(tgPath)
    utils.makeDir(subwavPath)
    
    name = os.path.splitext(fn)[0]
    piSamplingRate = 100  # Samples per second

    # Extract pitch and find patterns in the file
    outputFN = os.path.splitext(fn)[0] + ".txt"
    sampleStep = 1 / float(piSamplingRate)
    motherPIList = pitch_and_intensity.extractPI(join(inputPath, fn),
                                                 join(pitchPath, outputFN),
                                                 praatEXE,
                                                 minPitch, maxPitch,
                                                 sampleStep=sampleStep,
                                                 forceRegenerate=forceRegen)
    # entry = (time, pitchVal, intVal)
    pitchList = [float(entry[1]) for entry in motherPIList]
    timeDict = split_on_tone.splitFileOnTone(pitchList,
                                             piSamplingRate,
                                             toneFrequency,
                                             minEventDuration)

    # Output result as textgrid
    duration = audio_scripts.getSoundFileDuration(join(inputPath, fn))
    tg = tgio.Textgrid()
    for key in ['beep', 'speech', 'silence']:
github timmahrt / ProMo / examples / pitch_morph_example.py View on Github external
fromWavFN = fromName + ".wav"
toWavFN = toName + ".wav"

fromPitchFN = fromName + ".txt"
toPitchFN = toName + ".txt"

fromTGFN = join(root, os.path.splitext(fromWavFN)[0] + ".TextGrid")
toTGFN = join(root, os.path.splitext(toWavFN)[0] + ".TextGrid")

# Prepare the data for morphing
# 1ST load it into memory
fromPitch = pitch_and_intensity.extractPI(join(root, fromWavFN),
                                          join(root, fromPitchFN),
                                          praatEXE, minPitch,
                                          maxPitch, forceRegenerate=False)
toPitch = pitch_and_intensity.extractPI(join(root, toWavFN),
                                        join(root, toPitchFN),
                                        praatEXE, minPitch,
                                        maxPitch, forceRegenerate=False)

# 2ND remove intensity values
fromPitch = [(time, pitch) for time, pitch, _ in fromPitch]
toPitch = [(time, pitch) for time, pitch, _ in toPitch]

# 3RD select which sections to align.
# We'll use textgrids for this purpose.
tierName = "words"
fromPitch = f0_morph.getPitchForIntervals(fromPitch, fromTGFN, tierName)
toPitch = f0_morph.getPitchForIntervals(toPitch, toTGFN, tierName)

pitchTier = pitch_and_intensity.extractPitchTier(join(root, fromWavFN),
                                                 join(root, "mary1.PitchTier"),
github timmahrt / ProMo / examples / pitch_morph_example.py View on Github external
stepList = utils.generateStepList(3)

fromName = "mary1"
toName = "mary2"
fromWavFN = fromName + ".wav"
toWavFN = toName + ".wav"

fromPitchFN = fromName + ".txt"
toPitchFN = toName + ".txt"

fromTGFN = join(root, os.path.splitext(fromWavFN)[0] + ".TextGrid")
toTGFN = join(root, os.path.splitext(toWavFN)[0] + ".TextGrid")

# Prepare the data for morphing
# 1ST load it into memory
fromPitch = pitch_and_intensity.extractPI(join(root, fromWavFN),
                                          join(root, fromPitchFN),
                                          praatEXE, minPitch,
                                          maxPitch, forceRegenerate=False)
toPitch = pitch_and_intensity.extractPI(join(root, toWavFN),
                                        join(root, toPitchFN),
                                        praatEXE, minPitch,
                                        maxPitch, forceRegenerate=False)

# 2ND remove intensity values
fromPitch = [(time, pitch) for time, pitch, _ in fromPitch]
toPitch = [(time, pitch) for time, pitch, _ in toPitch]

# 3RD select which sections to align.
# We'll use textgrids for this purpose.
tierName = "words"
fromPitch = f0_morph.getPitchForIntervals(fromPitch, fromTGFN, tierName)
github timmahrt / praatIO / examples / get_pitch_and_formants.py View on Github external
rootOutputFolder = os.path.abspath(join(".", "files", "pitch_extraction"))
pitchPath = join(rootOutputFolder, "pitch")
formantsPath = join(rootOutputFolder, "formants")
pitchMeasuresPath = join(rootOutputFolder, "pitch_measures")
rmsIntensityPath = join(rootOutputFolder, "rms_intensity")


praatEXE = r"C:\Praat.exe"
#praatEXE = "/Applications/Praat.app/Contents/MacOS/Praat"
utils.makeDir(rootOutputFolder)
utils.makeDir(pitchPath)
utils.makeDir(pitchMeasuresPath)
utils.makeDir(rmsIntensityPath)
utils.makeDir(formantsPath)

bobbyPitchData = pitch_and_intensity.extractPI(join(wavPath, "bobby.wav"),
                                               join(pitchPath, "bobby.txt"),
                                               praatEXE, 50, 350,
                                               forceRegenerate=False)

# Here are two examples of the new functionality of extracting pitch
# from only labeled intervals in a textgrid.  However, the example files
# I have provided are too short and praat will not process them.

# Extracts each labeled interval as a separate wave file, extracts the
# pitch track from each of those, and then aggregates the result.
# pitch_and_intensity.extractPI(join(wavPath, "bobby.wav"),
#                               join(pitchPath, "bobby_segments.txt"),
#                               praatEXE, 50, 350,
#                               forceRegenerate=True,
#                               tgFN=join(wavPath, "bobby_words.TextGrid"),
#                               tierName="word")
github timmahrt / praatIO / examples / get_pitch_and_formants.py View on Github external
pitch_and_intensity.generatePIMeasures(maryPitchData,
                                       join(tgPath, "mary.TextGrid"),
                                       "word", doPitch=False,
                                       medianFilterWindowSize=9)

tg = tgio.openTextgrid(join(tgPath, "bobby_words.TextGrid"))
tg = pitch_and_intensity.detectPitchErrors(bobbyPitchData, 0.75, tg)[1]
tg.save(join(rootOutputFolder, "bobby_errors.TextGrid"))

tg = tgio.openTextgrid(join(tgPath, "mary.TextGrid"))
tg = pitch_and_intensity.detectPitchErrors(bobbyPitchData, 0.75, tg)[1]
tg.save(join(rootOutputFolder, "mary_errors.TextGrid"))

tg = tgio.openTextgrid(join(tgPath, "mary.TextGrid"))
tg = pitch_and_intensity.detectPitchErrors(maryFilteredPitchData, 0.75, tg)[1]
tg.save(join(rootOutputFolder, "mary_filtered_errors.TextGrid"))

formantData = praat_scripts.getFormants(praatEXE,
                          join(wavPath, "bobby.wav"),
                          join(formantsPath, "bobby.txt"),
                          5500)