Commit adf30187 authored by Steve Tjoa's avatar Steve Tjoa

tempo estimation

parent ea8d709a
......@@ -243,6 +243,7 @@ div#notebook {
<li><a href="signal_representations.html">Signal Representations</a> (<a href="signal_representations.ipynb">ipynb</a>)</li>
<li><a href="onset_detection.html">Onset Detection</a> (<a href="onset_detection.ipynb">ipynb</a>)</li>
<li><a href="beat_tracking.html">Beat Tracking</a> (<a href="beat_tracking.ipynb">ipynb</a>)</li>
<li><a href="tempo_estimation.html">Tempo Estimation</a> (<a href="tempo_estimation.ipynb">ipynb</a>)</li>
<li><a href="feature_sonification.html">Exercise: Understanding Audio Features through Sonification</a> (<a href="feature_sonification.ipynb">ipynb</a>)</li>
</ol>
......@@ -254,7 +255,7 @@ div#notebook {
</div>
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<h2 id="Chapter-2:-Spectral-Feature-Extraction-and-Classification">Chapter 2: Spectral Feature Extraction and Classification<a class="anchor-link" href="#Chapter-2:-Spectral-Feature-Extraction-and-Classification">&#182;</a></h2>
<h2 id="Chapter-2:-Feature-Extraction">Chapter 2: Feature Extraction<a class="anchor-link" href="#Chapter-2:-Feature-Extraction">&#182;</a></h2>
</div>
</div>
</div>
......@@ -264,9 +265,9 @@ div#notebook {
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<ol>
<li><a href="basic_feature_extraction.html">Basic Feature Extraction</a> (<a href="basic_feature_extraction.ipynb">ipynb</a>)</li>
<li><a href="spectral_features.html">Spectral Features</a> (<a href="spectral_features.ipynb">ipynb</a>)</li>
<li><a href="mfcc.html">Mel-Frequency Cepstral Coefficients</a> (<a href="mfcc.ipynb">ipynb</a>)</li>
<li><a href="knn_instrument_classification.html">K-Nearest Neighbor Instrument Classification</a> (<a href="knn_instrument_classification.ipynb">ipynb</a>)</li>
</ol>
</div>
......@@ -287,6 +288,9 @@ div#notebook {
<div class="inner_cell">
<div class="text_cell_render border-box-sizing rendered_html">
<ol>
<li><a href="basic_supervised_classification.html">Basic Supervised Classification</a> (<a href="basic_supervised_classification.ipynb">ipynb</a>)</li>
<li><a href="knn.html">K-Nearest Neighbor Classification</a> (<a href="knn.ipynb">ipynb</a>)</li>
<li><a href="knn_instrument_classification.html">Exercise: K-Nearest Neighbor Instrument Classification</a> (<a href="knn_instrument_classification.ipynb">ipynb</a>)</li>
<li><a href="notebooks/kmeans.ipynb">K-Means Clustering</a></li>
<li><a href="exercises/kmeans_instrument_classification.ipynb">Exercise: Unsupervised Instrument Classification using K-Means</a></li>
</ol>
......@@ -358,6 +362,7 @@ div#notebook {
<li><a href="segmentation.html">Segmentation</a> (<a href="segmentation.ipynb">ipynb</a>)</li>
<li><a href="beat_tracking_essentia.html">Beat Tracking in Essentia</a> (<a href="beat_tracking_essentia.ipynb">ipynb</a>)</li>
<li><a href="spectral_features_essentia.html">Spectral Features in Essentia</a> (<a href="spectral_features_essentia.ipynb">ipynb</a>)</li>
<li><a href="feature_extraction_essentia.html">Feature Extraction in Essentia</a> (<a href="feature_extraction_essentia.ipynb">ipynb</a>)</li>
</ol>
</div>
......@@ -379,8 +384,6 @@ div#notebook {
<div class="text_cell_render border-box-sizing rendered_html">
<ol>
<li><a href="notebooks/tonal.ipynb">Tonal Descriptors: Pitch and Chroma</a></li>
<li><a href="notebooks/feature_extraction.ipynb">Feature Extraction</a></li>
<li><a href="notebooks/tempo_estimation.ipynb">Tempo Estimation</a></li>
</ol>
</div>
......
......@@ -46,6 +46,7 @@
"1. [Signal Representations](signal_representations.html) ([ipynb](signal_representations.ipynb))\n",
"1. [Onset Detection](onset_detection.html) ([ipynb](onset_detection.ipynb))\n",
"1. [Beat Tracking](beat_tracking.html) ([ipynb](beat_tracking.ipynb))\n",
"1. [Tempo Estimation](tempo_estimation.html) ([ipynb](tempo_estimation.ipynb))\n",
"1. [Exercise: Understanding Audio Features through Sonification](feature_sonification.html) ([ipynb](feature_sonification.ipynb))"
]
},
......@@ -53,16 +54,16 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Chapter 2: Spectral Feature Extraction and Classification"
"## Chapter 2: Feature Extraction"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"1. [Basic Feature Extraction](basic_feature_extraction.html) ([ipynb](basic_feature_extraction.ipynb))\n",
"1. [Spectral Features](spectral_features.html) ([ipynb](spectral_features.ipynb))\n",
"1. [Mel-Frequency Cepstral Coefficients](mfcc.html) ([ipynb](mfcc.ipynb))\n",
"1. [K-Nearest Neighbor Instrument Classification](knn_instrument_classification.html) ([ipynb](knn_instrument_classification.ipynb))"
"1. [Mel-Frequency Cepstral Coefficients](mfcc.html) ([ipynb](mfcc.ipynb))"
]
},
{
......@@ -76,6 +77,9 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"1. [Basic Supervised Classification](basic_supervised_classification.html) ([ipynb](basic_supervised_classification.ipynb))\n",
"1. [K-Nearest Neighbor Classification](knn.html) ([ipynb](knn.ipynb))\n",
"1. [Exercise: K-Nearest Neighbor Instrument Classification](knn_instrument_classification.html) ([ipynb](knn_instrument_classification.ipynb))\n",
"1. [K-Means Clustering](notebooks/kmeans.ipynb)\n",
"1. [Exercise: Unsupervised Instrument Classification using K-Means](exercises/kmeans_instrument_classification.ipynb)"
]
......@@ -125,7 +129,8 @@
"source": [
"1. [Segmentation](segmentation.html) ([ipynb](segmentation.ipynb))\n",
"1. [Beat Tracking in Essentia](beat_tracking_essentia.html) ([ipynb](beat_tracking_essentia.ipynb))\n",
"1. [Spectral Features in Essentia](spectral_features_essentia.html) ([ipynb](spectral_features_essentia.ipynb))"
"1. [Spectral Features in Essentia](spectral_features_essentia.html) ([ipynb](spectral_features_essentia.ipynb))\n",
"1. [Feature Extraction in Essentia](feature_extraction_essentia.html) ([ipynb](feature_extraction_essentia.ipynb))"
]
},
{
......@@ -140,8 +145,8 @@
"metadata": {},
"source": [
"1. [Tonal Descriptors: Pitch and Chroma](notebooks/tonal.ipynb)\n",
"1. [Feature Extraction](notebooks/feature_extraction.ipynb)\n",
"1. [Tempo Estimation](notebooks/tempo_estimation.ipynb)\n"
"\n",
"\n"
]
}
],
......
{
"metadata": {
"name": "",
"signature": "sha256:97a0df8e94e03f0ee8b35bdcfeecb7fe032f56066a42d95137d5a30c286d8790"
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "heading",
"level": 1,
"metadata": {},
"source": [
"Tempo Estimation"
]
},
{
"cell_type": "heading",
"level": 2,
"metadata": {},
"source": [
"`librosa.beat.estimate_tempo`"
]
},
{
"cell_type": "heading",
"level": 2,
"metadata": {},
"source": [
"`essentia.standard.Tempo*`"
]
},
{
"cell_type": "code",
"collapsed": false,
"input": [],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}
\ No newline at end of file
from core import *
import errno
import os
import os.path
import urllib
def download_drum_samples(path='drum_samples'):
"""Download ten kick drum samples and ten snare drum samples.
`path`: output directory containing the twenty drum samples
Returns:
`kick_filepaths`: list of kick drum filepaths
`snare_filepaths`: list of snare drum filepaths
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
else:
print "Directory %s already exists." % path
for drum_type in ['kick', 'snare']:
for i in range(1, 11):
filename = '%s_%02d.wav' % (drum_type, i)
urllib.urlretrieve('http://audio.musicinformationretrieval.com/drum_samples/%s' % filename,
filename=os.path.join(path, filename))
kick_filepaths = [os.path.join(path, 'kick_%02d.wav' % i) for i in range(1, 11)]
snare_filepaths = [os.path.join(path, 'snare_%02d.wav' % i) for i in range(1, 11)]
return kick_filepaths, snare_filepaths
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment