Notes#
Show imports
import os
from collections import defaultdict, Counter
from git import Repo
import dimcat as dc
import ms3
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from utils import STD_LAYOUT, CADENCE_COLORS, CORPUS_COLOR_SCALE, chronological_corpus_order, color_background, get_corpus_display_name, get_repo_name, resolve_dir, value_count_df, get_repo_name, print_heading, resolve_dir
Show source
CORPUS_PATH = os.path.abspath(os.path.join('..', '..'))
ANNOTATED_ONLY = os.getenv("ANNOTATED_ONLY", "True").lower() in ('true', '1', 't')
print_heading("Notebook settings")
print(f"CORPUS_PATH: {CORPUS_PATH!r}")
print(f"ANNOTATED_ONLY: {ANNOTATED_ONLY}")
CORPUS_PATH = resolve_dir(CORPUS_PATH)
Notebook settings
-----------------
CORPUS_PATH: '/home/runner/work/workflow_deployment/debussy_deux_arabesques'
ANNOTATED_ONLY: False
Show source
repo = Repo(CORPUS_PATH)
print_heading("Data and software versions")
print(f"Data repo '{get_repo_name(repo)}' @ {repo.commit().hexsha[:7]}")
print(f"dimcat version {dc.__version__}")
print(f"ms3 version {ms3.__version__}")
Data and software versions
--------------------------
Data repo 'debussy_deux_arabesques' @ 672f746
dimcat version 0.3.0
ms3 version 2.5.2
dataset = dc.Dataset()
dataset.load(directory=CORPUS_PATH, parse_tsv=False)
[default|all]
All corpora
-----------
View: This view is called 'default'. It
- excludes pieces that are not contained in the metadata,
- filters out file extensions requiring conversion (such as .xml), and
- excludes review files and folders.
has active scores measures notes
metadata view detected detected parsed detected parsed
corpus
debussy_deux_arabesques yes default 2 2 2 2 2
N = 2 annotated pieces, 4 parsed dataframes.
Metadata#
all_metadata = dataset.data.metadata()
print(f"Concatenated 'metadata.tsv' files cover {len(all_metadata)} of the {dataset.data.count_pieces()} scores.")
all_metadata.reset_index(level=1).groupby(level=0).nth(0).iloc[:,:20]
Concatenated 'metadata.tsv' files cover 2 of the 2 scores.
piece | TimeSig | KeySig | last_mc | last_mn | length_qb | last_mc_unfolded | last_mn_unfolded | length_qb_unfolded | all_notes_qb | n_onsets | n_onset_positions | guitar_chord_count | form_label_count | label_count | composed_start | composed_end | composer | workTitle | movementNumber | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
corpus | ||||||||||||||||||||
debussy_deux_arabesques | l066-01_arabesques_premiere | {1: '4/4', 94: '2/4', 95: '4/4'} | {1: 4, 39: 3, 71: 4} | 107 | 107 | 426.0 | 107 | 107 | 426.0 | 1207.83 | 1484 | 1018 | 0 | 0 | 0 | 1888 | 1888 | Claude Debussy | Premiere Arabesque |
Compute chronological order
chronological_order = chronological_corpus_order(all_metadata)
corpus_colors = dict(zip(chronological_order, CORPUS_COLOR_SCALE))
chronological_order
['debussy_deux_arabesques']
all_notes = dataset.data.get_all_parsed('notes', force=True, flat=True)
print(f"{len(all_notes.index)} notes over {len(all_notes.groupby(level=[0,1]))} files.")
all_notes.head()
3424 notes over 2 files.
mc | mn | quarterbeats | duration_qb | mc_onset | mn_onset | timesig | staff | voice | duration | nominal_duration | scalar | tied | tpc | midi | name | octave | chord_id | gracenote | |||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
corpus | piece | i | |||||||||||||||||||
debussy_deux_arabesques | l066-01_arabesques_premiere | 0 | 1 | 1 | 0 | 0.333333 | 0 | 0 | 4/4 | 2 | 1 | 1/12 | 1/8 | 2/3 | <NA> | 7 | 61 | C#4 | 4 | 6 | <NA> |
1 | 1 | 1 | 1/3 | 0.333333 | 1/12 | 1/12 | 4/4 | 2 | 1 | 1/12 | 1/8 | 2/3 | <NA> | 4 | 64 | E4 | 4 | 7 | <NA> | ||
2 | 1 | 1 | 2/3 | 0.333333 | 1/6 | 1/6 | 4/4 | 2 | 1 | 1/12 | 1/8 | 2/3 | <NA> | 3 | 69 | A4 | 4 | 8 | <NA> | ||
3 | 1 | 1 | 1 | 0.333333 | 1/4 | 1/4 | 4/4 | 1 | 1 | 1/12 | 1/8 | 2/3 | <NA> | 7 | 73 | C#5 | 5 | 0 | <NA> | ||
4 | 1 | 1 | 4/3 | 0.333333 | 1/3 | 1/3 | 4/4 | 1 | 1 | 1/12 | 1/8 | 2/3 | <NA> | 4 | 76 | E5 | 5 | 1 | <NA> |
def weight_notes(nl, group_col='midi', precise=True):
summed_durations = nl.groupby(group_col).duration_qb.sum()
shortest_duration = summed_durations[summed_durations > 0].min()
summed_durations /= shortest_duration # normalize such that the shortest duration results in 1 occurrence
if not precise:
# This simple trick reduces compute time but also precision:
# The rationale is to have the smallest value be slightly larger than 0.5 because
# if it was exactly 0.5 it would be rounded down by repeat_notes_according_to_weights()
summed_durations /= 1.9999999
return repeat_notes_according_to_weights(summed_durations)
def repeat_notes_according_to_weights(weights):
try:
counts = weights.round().astype(int)
except Exception:
return pd.Series(dtype=int)
counts_reflecting_weights = []
for pitch, count in counts.items():
counts_reflecting_weights.extend([pitch]*count)
return pd.Series(counts_reflecting_weights)
Ambitus#
corpus_names = {corp: get_corpus_display_name(corp) for corp in chronological_order}
chronological_corpus_names = list(corpus_names.values())
corpus_name_colors = {corpus_names[corp]: color for corp, color in corpus_colors.items()}
all_notes['corpus_name'] = all_notes.index.get_level_values(0).map(corpus_names)
grouped_notes = all_notes.groupby('corpus_name')
weighted_midi = pd.concat([weight_notes(nl, 'midi', precise=False) for _, nl in grouped_notes], keys=grouped_notes.groups.keys()).reset_index(level=0)
weighted_midi.columns = ['dataset', 'midi']
weighted_midi
dataset | midi | |
---|---|---|
0 | Debussy Deux Arabesques | 28 |
1 | Debussy Deux Arabesques | 29 |
2 | Debussy Deux Arabesques | 29 |
3 | Debussy Deux Arabesques | 29 |
4 | Debussy Deux Arabesques | 29 |
... | ... | ... |
2433 | Debussy Deux Arabesques | 92 |
2434 | Debussy Deux Arabesques | 92 |
2435 | Debussy Deux Arabesques | 92 |
2436 | Debussy Deux Arabesques | 92 |
2437 | Debussy Deux Arabesques | 93 |
2438 rows × 2 columns
yaxis=dict(tickmode= 'array',
tickvals= [12, 24, 36, 48, 60, 72, 84, 96],
ticktext = ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7"],
gridcolor='lightgrey',
)
fig = px.violin(weighted_midi,
x='dataset',
y='midi',
color='dataset',
box=True,
labels=dict(
dataset='',
midi='distribution of pitches by duration'
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000, height=600,
)
fig.update_traces(spanmode='hard') # do not extend beyond outliers
fig.update_layout(yaxis=yaxis,
**STD_LAYOUT,
showlegend=False)
fig.show()
Tonal Pitch Classes (TPC)#
weighted_tpc = pd.concat([weight_notes(nl, 'tpc') for _, nl in grouped_notes], keys=grouped_notes.groups.keys()).reset_index(level=0)
weighted_tpc.columns = ['dataset', 'tpc']
weighted_tpc
dataset | tpc | |
---|---|---|
0 | Debussy Deux Arabesques | -5 |
1 | Debussy Deux Arabesques | -5 |
2 | Debussy Deux Arabesques | -5 |
3 | Debussy Deux Arabesques | -5 |
4 | Debussy Deux Arabesques | -5 |
... | ... | ... |
3641 | Debussy Deux Arabesques | 13 |
3642 | Debussy Deux Arabesques | 13 |
3643 | Debussy Deux Arabesques | 13 |
3644 | Debussy Deux Arabesques | 13 |
3645 | Debussy Deux Arabesques | 13 |
3646 rows × 2 columns
As violin plot#
yaxis=dict(
tickmode= 'array',
tickvals= [-12, -9, -6, -3, 0, 3, 6, 9, 12, 15, 18],
ticktext = ["Dbb", "Bbb", "Gb", "Eb", "C", "A", "F#", "D#", "B#", "G##", "E##"],
gridcolor='lightgrey',
zerolinecolor='lightgrey',
zeroline=True
)
fig = px.violin(weighted_tpc,
x='dataset',
y='tpc',
color='dataset',
box=True,
labels=dict(
dataset='',
tpc='distribution of tonal pitch classes by duration'
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000,
height=600,
)
fig.update_traces(spanmode='hard') # do not extend beyond outliers
fig.update_layout(yaxis=yaxis,
**STD_LAYOUT,
showlegend=False)
fig.show()
As bar plots#
bar_data = all_notes.groupby('tpc').duration_qb.sum().reset_index()
x_values = list(range(bar_data.tpc.min(), bar_data.tpc.max()+1))
x_names = ms3.fifths2name(x_values)
fig = px.bar(bar_data, x='tpc', y='duration_qb',
labels=dict(tpc='Named pitch class',
duration_qb='Duration in quarter notes'
),
color_discrete_sequence=CORPUS_COLOR_SCALE,
width=1000, height=300,
)
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='grey', tickmode='array',
tickvals=x_values, ticktext = x_names, dtick=1, ticks='outside', tickcolor='black',
minor=dict(dtick=6, gridcolor='grey', showgrid=True),
)
fig.show()
scatter_data = all_notes.groupby(['corpus_name', 'tpc']).duration_qb.sum().reset_index()
fig = px.bar(scatter_data, x='tpc', y='duration_qb', color='corpus_name',
labels=dict(
duration_qb='duration',
tpc='named pitch class',
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
width=1000, height=500,
)
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='grey', tickmode='array',
tickvals=x_values, ticktext = x_names, dtick=1, ticks='outside', tickcolor='black',
minor=dict(dtick=6, gridcolor='grey', showgrid=True),
)
fig.show()
As scatter plots#
fig = px.scatter(scatter_data, x='tpc', y='duration_qb', color='corpus_name',
labels=dict(
duration_qb='duration',
tpc='named pitch class',
),
category_orders=dict(dataset=chronological_corpus_names),
color_discrete_map=corpus_name_colors,
facet_col='corpus_name', facet_col_wrap=3, facet_col_spacing=0.03,
width=1000, height=1000,
)
fig.update_traces(mode='lines+markers')
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_layout(**STD_LAYOUT, showlegend=False)
fig.update_xaxes(gridcolor='lightgrey', zerolinecolor='lightgrey', tickmode='array', tickvals= [-12, -6, 0, 6, 12, 18],
ticktext = ["Dbb", "Gb", "C", "F#", "B#", "E##"], visible=True, )
fig.update_yaxes(gridcolor='lightgrey', zeroline=False, matches=None, showticklabels=True)
fig.show()
no_accidental = bar_data[bar_data.tpc.between(-1,5)].duration_qb.sum()
with_accidental = bar_data[~bar_data.tpc.between(-1,5)].duration_qb.sum()
entire = no_accidental + with_accidental
f"Fraction of note duration without accidental of the entire durations: {no_accidental} / {entire} = {no_accidental / entire}"
'Fraction of note duration without accidental of the entire durations: 1696.0000000000002 / 2429.8333333333335 = 0.6979902599629605'
Notes and staves#
print("Distribution of notes over staves:")
value_count_df(all_notes.staff)
Distribution of notes over staves:
counts | % | |
---|---|---|
staff | ||
1 | 1883 | 0.549942 |
2 | 1541 | 0.450058 |