Notes#
Show imports
import os
from collections import defaultdict, Counter
from git import Repo
import dimcat as dc
import ms3
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from utils import STD_LAYOUT, CADENCE_COLORS, CORPUS_COLOR_SCALE, chronological_corpus_order, color_background, get_corpus_display_name, get_repo_name, resolve_dir, value_count_df, get_repo_name, print_heading, resolve_dir
Show source
CORPUS_PATH = os.path.abspath(os.path.join('..', '..'))
ANNOTATED_ONLY = os.getenv("ANNOTATED_ONLY", "True").lower() in ('true', '1', 't')
print_heading("Notebook settings")
print(f"CORPUS_PATH: {CORPUS_PATH!r}")
print(f"ANNOTATED_ONLY: {ANNOTATED_ONLY}")
CORPUS_PATH = resolve_dir(CORPUS_PATH)
Notebook settings
-----------------
CORPUS_PATH: '/home/runner/work/workflow_deployment/debussy_childrens_corner'
ANNOTATED_ONLY: False
Show source
repo = Repo(CORPUS_PATH)
print_heading("Data and software versions")
print(f"Data repo '{get_repo_name(repo)}' @ {repo.commit().hexsha[:7]}")
print(f"dimcat version {dc.__version__}")
print(f"ms3 version {ms3.__version__}")
Data and software versions
--------------------------
Data repo 'debussy_childrens_corner' @ ea31f19
dimcat version 0.3.0
ms3 version 2.5.2
dataset = dc.Dataset()
dataset.load(directory=CORPUS_PATH, parse_tsv=False)
[default|all]
All corpora
-----------
View: This view is called 'default'. It
- excludes pieces that are not contained in the metadata,
- filters out file extensions requiring conversion (such as .xml), and
- excludes review files and folders.
has active scores measures notes
metadata view detected detected parsed detected parsed
corpus
debussy_childrens_corner yes default 6 6 6 6 6
N = 6 annotated pieces, 12 parsed dataframes.
Metadata#
all_metadata = dataset.data.metadata()
print(f"Concatenated 'metadata.tsv' files cover {len(all_metadata)} of the {dataset.data.count_pieces()} scores.")
all_metadata.reset_index(level=1).groupby(level=0).nth(0).iloc[:,:20]
Concatenated 'metadata.tsv' files cover 6 of the 6 scores.
piece | TimeSig | KeySig | last_mc | last_mn | length_qb | last_mc_unfolded | last_mn_unfolded | length_qb_unfolded | all_notes_qb | n_onsets | n_onset_positions | guitar_chord_count | form_label_count | label_count | composed_start | composed_end | composer | workTitle | movementNumber | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
corpus | ||||||||||||||||||||
debussy_childrens_corner | l113-01_childrens_doctor | {1: '4/4'} | {1: 0, 33: -2, 37: -4, 45: 0} | 76 | 76 | 304.0 | 76 | 76 | 304.0 | 707.25 | 1259 | 1033 | 0 | 0 | 0 | 1906 | 1908 | Claude Debussy | NaN |
Compute chronological order
chronological_order = chronological_corpus_order(all_metadata)
corpus_colors = dict(zip(chronological_order, CORPUS_COLOR_SCALE))
chronological_order
['debussy_childrens_corner']
all_notes = dataset.data.get_all_parsed('notes', force=True, flat=True)
print(f"{len(all_notes.index)} notes over {len(all_notes.groupby(level=[0,1]))} files.")
all_notes.head()
6575 notes over 6 files.
mc | mn | quarterbeats | duration_qb | mc_onset | mn_onset | timesig | staff | voice | duration | gracenote | nominal_duration | scalar | tied | tpc | midi | name | octave | chord_id | |||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
corpus | piece | i | |||||||||||||||||||
debussy_childrens_corner | l113-01_childrens_doctor | 0 | 1 | 1 | 0 | 0.00 | 0 | 0 | 4/4 | 2 | 1 | 0 | acciaccatura | 1/8 | 1 | <NA> | 0 | 36 | C2 | 2 | 15 |
1 | 1 | 1 | 0 | 4.00 | 0 | 0 | 4/4 | 2 | 1 | 1 | <NA> | 1 | 1 | 1 | 0 | 48 | C3 | 3 | 16 | ||
2 | 1 | 1 | 1/4 | 0.25 | 1/16 | 1/16 | 4/4 | 1 | 1 | 1/16 | <NA> | 1/16 | 1 | <NA> | 1 | 55 | G3 | 3 | 0 | ||
3 | 1 | 1 | 1/2 | 0.25 | 1/8 | 1/8 | 4/4 | 1 | 1 | 1/16 | <NA> | 1/16 | 1 | <NA> | 0 | 60 | C4 | 4 | 1 | ||
4 | 1 | 1 | 3/4 | 0.25 | 3/16 | 3/16 | 4/4 | 1 | 1 | 1/16 | <NA> | 1/16 | 1 | <NA> | 2 | 62 | D4 | 4 | 2 |
def weight_notes(nl, group_col='midi', precise=True):
summed_durations = nl.groupby(group_col).duration_qb.sum()
shortest_duration = summed_durations[summed_durations > 0].min()
summed_durations /= shortest_duration # normalize such that the shortest duration results in 1 occurrence
if not precise:
# This simple trick reduces compute time but also precision:
# The rationale is to have the smallest value be slightly larger than 0.5 because
# if it was exactly 0.5 it would be rounded down by repeat_notes_according_to_weights()
summed_durations /= 1.9999999
return repeat_notes_according_to_weights(summed_durations)
def repeat_notes_according_to_weights(weights):
try:
counts = weights.round().astype(int)
except Exception:
return pd.Series(dtype=int)
counts_reflecting_weights = []
for pitch, count in counts.items():
counts_reflecting_weights.extend([pitch]*count)
return pd.Series(counts_reflecting_weights)
Ambitus#
corpus_names = {corp: get_corpus_display_name(corp) for corp in chronological_order}
chronological_corpus_names = list(corpus_names.values())
corpus_name_colors = {corpus_names[corp]: color for corp, color in corpus_colors.items()}
all_notes['corpus_name'] = all_notes.index.get_level_values(0).map(corpus_names)
grouped_notes = all_notes.groupby('corpus_name')
weighted_midi = pd.concat([weight_notes(nl, 'midi', precise=False) for _, nl in grouped_notes], keys=grouped_notes.groups.keys()).reset_index(level=0)
weighted_midi.columns = ['dataset', 'midi']
weighted_midi
dataset | midi | |
---|---|---|
0 | Debussy Childrens Corner | 22 |
1 | Debussy Childrens Corner | 22 |
2 | Debussy Childrens Corner | 24 |
3 | Debussy Childrens Corner | 24 |
4 | Debussy Childrens Corner | 27 |
... | ... | ... |
9073 | Debussy Childrens Corner | 94 |
9074 | Debussy Childrens Corner | 94 |
9075 | Debussy Childrens Corner | 94 |
9076 | Debussy Childrens Corner | 94 |
9077 | Debussy Childrens Corner | 94 |
9078 rows × 2 columns
yaxis=dict(tickmode= 'array',
tickvals= [12, 24, 36, 48, 60, 72, 84, 96],
ticktext = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7"],
gridcolor='lightgrey',
)
fig = px.violin(weighted_midi,
x='dataset',
y='midi',
color='dataset',
box=True,
labels=dict(
dataset='',
midi='distribution of pitches by duration'
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000, height=600,
)
fig.update_traces(spanmode='hard') # do not extend beyond outliers
fig.update_layout(yaxis=yaxis,
**STD_LAYOUT,
showlegend=False)
fig.show()
Tonal Pitch Classes (TPC)#
weighted_tpc = pd.concat([weight_notes(nl, 'tpc') for _, nl in grouped_notes], keys=grouped_notes.groups.keys()).reset_index(level=0)
weighted_tpc.columns = ['dataset', 'tpc']
weighted_tpc
dataset | tpc | |
---|---|---|
0 | Debussy Childrens Corner | -8 |
1 | Debussy Childrens Corner | -8 |
2 | Debussy Childrens Corner | -8 |
3 | Debussy Childrens Corner | -7 |
4 | Debussy Childrens Corner | -7 |
... | ... | ... |
808 | Debussy Childrens Corner | 11 |
809 | Debussy Childrens Corner | 11 |
810 | Debussy Childrens Corner | 11 |
811 | Debussy Childrens Corner | 12 |
812 | Debussy Childrens Corner | 13 |
813 rows × 2 columns
As violin plot#
yaxis=dict(
tickmode= 'array',
tickvals= [-12, -9, -6, -3, 0, 3, 6, 9, 12, 15, 18],
ticktext = ["Dbb", "Bbb", "Gb", "Eb", "C", "A", "F#", "D#", "B#", "G##", "E##"],
gridcolor='lightgrey',
zerolinecolor='lightgrey',
zeroline=True
)
fig = px.violin(weighted_tpc,
x='dataset',
y='tpc',
color='dataset',
box=True,
labels=dict(
dataset='',
tpc='distribution of tonal pitch classes by duration'
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000,
height=600,
)
fig.update_traces(spanmode='hard') # do not extend beyond outliers
fig.update_layout(yaxis=yaxis,
**STD_LAYOUT,
showlegend=False)
fig.show()
As bar plots#
bar_data = all_notes.groupby('tpc').duration_qb.sum().reset_index()
x_values = list(range(bar_data.tpc.min(), bar_data.tpc.max()+1))
x_names = ms3.fifths2name(x_values)
fig = px.bar(bar_data, x='tpc', y='duration_qb',
labels=dict(tpc='Named pitch class',
duration_qb='Duration in quarter notes'
),
color_discrete_sequence=CORPUS_COLOR_SCALE,
width=1000, height=300,
)
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='grey', tickmode='array',
tickvals=x_values, ticktext = x_names, dtick=1, ticks='outside', tickcolor='black',
minor=dict(dtick=6, gridcolor='grey', showgrid=True),
)
fig.show()
scatter_data = all_notes.groupby(['corpus_name', 'tpc']).duration_qb.sum().reset_index()
fig = px.bar(scatter_data, x='tpc', y='duration_qb', color='corpus_name',
labels=dict(
duration_qb='duration',
tpc='named pitch class',
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000, height=500,
)
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='grey', tickmode='array',
tickvals=x_values, ticktext = x_names, dtick=1, ticks='outside', tickcolor='black',
minor=dict(dtick=6, gridcolor='grey', showgrid=True),
)
fig.show()
As scatter plots#
fig = px.scatter(scatter_data, x='tpc', y='duration_qb', color='corpus_name',
labels=dict(
duration_qb='duration',
tpc='named pitch class',
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
facet_col='corpus_name', facet_col_wrap=3, facet_col_spacing=0.03,
width=1000, height=1000,
)
fig.update_traces(mode='lines+markers')
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(**STD_LAYOUT, showlegend=False)
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='lightgrey', tickmode='array', tickvals= [-12, -6, 0, 6, 12, 18],
ticktext = ["Dbb", "Gb", "C", "F#", "B#", "E##"], visible=True, )
fig.update_yaxes(gridcolor='lightgrey', zeroline=False, matches=None, showticklabels=True)
fig.show()
no_accidental = bar_data[bar_data.tpc.between(-1,5)].duration_qb.sum()
with_accidental = bar_data[~bar_data.tpc.between(-1,5)].duration_qb.sum()
entire = no_accidental + with_accidental
f"Fraction of note duration without accidental of the entire durations: {no_accidental} / {entire} = {no_accidental / entire}"
'Fraction of note duration without accidental of the entire durations: 2452.9166666666665 / 4536.25 = 0.5407366583999265'
Notes and staves#
print("Distribution of notes over staves:")
value_count_df(all_notes.staff)
Distribution of notes over staves:
counts | % | |
---|---|---|
staff | ||
1 | 3379 | 0.513916 |
2 | 3196 | 0.486084 |