Overview#

This notebook gives a general overview of the features included in the dataset.

Hide imports
import os
from collections import defaultdict, Counter
from fractions import Fraction

from git import Repo
import dimcat as dc
import ms3
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go

from utils import CADENCE_COLORS, CORPUS_COLOR_SCALE, STD_LAYOUT, TYPE_COLORS, color_background, corpus_mean_composition_years, value_count_df, get_corpus_display_name, get_repo_name, print_heading, resolve_dir
Hide source
CORPUS_PATH = os.path.abspath(os.path.join('..', '..'))
ANNOTATED_ONLY = os.getenv("ANNOTATED_ONLY", "True").lower() in ('true', '1', 't')
print_heading("Notebook settings")
print(f"CORPUS_PATH: {CORPUS_PATH!r}")
print(f"ANNOTATED_ONLY: {ANNOTATED_ONLY}")
CORPUS_PATH = resolve_dir(CORPUS_PATH)
Notebook settings
-----------------

CORPUS_PATH: '/home/runner/work/workflow_deployment/debussy_piano'
ANNOTATED_ONLY: False
Hide source
repo = Repo(CORPUS_PATH)
print_heading("Data and software versions")
print(f"Data repo '{get_repo_name(repo)}' @ {repo.commit().hexsha[:7]}")
print(f"dimcat version {dc.__version__}")
print(f"ms3 version {ms3.__version__}")
Data and software versions
--------------------------

Data repo 'debussy_piano' @ fd3b785
dimcat version 0.3.0
ms3 version 2.5.2
dataset = dc.Dataset()
dataset.load(directory=CORPUS_PATH, parse_tsv=False)
[default|all]
All corpora
-----------
View: This view is called 'default'. It 
	- excludes pieces that are not contained in the metadata,
	- filters out file extensions requiring conversion (such as .xml), and
	- excludes review files and folders.

                                has   active   scores measures           notes        expanded       
                           metadata     view detected detected parsed detected parsed detected parsed
corpus                                                                                               
debussy_childrens_corner        yes  default        6        6      6        6      6        0      0
debussy_deux_arabesques         yes  default        2        2      2        2      2        0      0
debussy_estampes                yes  default        3        3      3        3      3        0      0
debussy_etudes                  yes  default       12       12     12       12     12        0      0
debussy_images                  yes  default        9        9      9        9      9        0      0
debussy_other_piano_pieces      yes  default       19       19     19       19     19        0      0
debussy_pour_le_piano           yes  default        3        3      3        3      3        0      0
debussy_preludes                yes  default       24       24     24       24     24        0      0
debussy_suite_bergamasque       yes  default        4        4      4        4      4        4      4
publication_data_and_code        no  default        0        0      0        0      0        0      0

28/860 files are excluded from this view.

28 files have been excluded based on their subdir.
N = 82 annotated pieces, 168 parsed dataframes.
Hide data loading
all_metadata = dataset.data.metadata()
assert len(all_metadata) > 0, "No pieces selected for analysis."
print(f"Metadata covers {len(all_metadata)} of the {dataset.data.count_pieces()} scores.")
all_notes = dataset.get_facet('notes')
all_measures = dataset.get_facet('measures')
mean_composition_years = corpus_mean_composition_years(all_metadata)
chronological_order = mean_composition_years.index.to_list()
corpus_colors = dict(zip(chronological_order, CORPUS_COLOR_SCALE))
corpus_names = {corp: get_corpus_display_name(corp) for corp in chronological_order}
chronological_corpus_names = list(corpus_names.values())
corpus_name_colors = {corpus_names[corp]: color for corp, color in corpus_colors.items()}
Metadata covers 82 of the 82 scores.

Composition dates#

This section relies on the dataset’s metadata.

valid_composed_start = pd.to_numeric(all_metadata.composed_start, errors='coerce')
valid_composed_end = pd.to_numeric(all_metadata.composed_end, errors='coerce')
print(f"Composition dates range from {int(valid_composed_start.min())} {valid_composed_start.idxmin()} "
      f"to {int(valid_composed_end.max())} {valid_composed_end.idxmax()}.")
Composition dates range from 1880 ('debussy_other_piano_pieces', 'l009_danse') to 1917 ('debussy_other_piano_pieces', 'l000_soirs').

Mean composition years per corpus#

Hide source
summary = all_metadata.copy()
summary.length_qb = all_measures.groupby(level=[0,1]).act_dur.sum() * 4.0
summary = pd.concat([summary,
                     all_notes.groupby(level=[0,1]).size().rename('notes'),
                    ], axis=1)
bar_data = pd.concat([mean_composition_years.rename('year'), 
                      summary.groupby(level='corpus').size().rename('pieces')],
                     axis=1
                    ).reset_index()
fig = px.bar(bar_data, x='year', y='pieces', color='corpus',
             color_discrete_map=corpus_colors,
            )
fig.update_traces(width=5)
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.update_traces(width=5)

Composition years histogram#

Hide source
hist_data = summary.reset_index()
hist_data.corpus = hist_data.corpus.map(corpus_names)
fig = px.histogram(hist_data, x='composed_end', color='corpus',
                   labels=dict(composed_end='decade',
                               count='pieces',
                              ),
                   color_discrete_map=corpus_name_colors,
                  )
fig.update_traces(xbins=dict(
    size=10
))
fig.update_layout(**STD_LAYOUT)
fig.update_yaxes(gridcolor='lightgrey')
fig.show()

Dimensions#

Overview#

Hide source
corpus_metadata = summary.groupby(level=0)
n_pieces = corpus_metadata.size().rename('pieces')
absolute_numbers = dict(
    measures = corpus_metadata.last_mn.sum(),
    length = corpus_metadata.length_qb.sum(),
    notes = corpus_metadata.notes.sum(),
    labels = corpus_metadata.label_count.sum(),
)
absolute = pd.DataFrame.from_dict(absolute_numbers)
absolute = pd.concat([n_pieces, absolute], axis=1)
sum_row = pd.DataFrame(absolute.sum(), columns=['sum']).T
absolute = pd.concat([absolute, sum_row])
relative = absolute.div(n_pieces, axis=0)
complete_summary = pd.concat([absolute, relative, absolute.iloc[:1,2:].div(absolute.measures, axis=0)], axis=1, keys=['absolute', 'per piece', 'per measure'])
complete_summary = complete_summary.apply(pd.to_numeric).round(2)
complete_summary.index = complete_summary.index.map(dict(corpus_names, sum='sum'))
complete_summary
absolute per piece per measure
pieces measures length notes labels pieces measures length notes labels length notes labels
Debussy Childrens Corner 6 514.0 1672.00 6575 0.0 1.0 85.67 278.67 1095.83 0.00 3.25 12.79 0.0
Debussy Deux Arabesques 2 217.0 866.00 3424 0.0 1.0 108.50 433.00 1712.00 0.00 NaN NaN NaN
Debussy Estampes 3 391.0 1299.00 8427 0.0 1.0 130.33 433.00 2809.00 0.00 NaN NaN NaN
Debussy Etudes 12 1072.0 2829.00 25076 0.0 1.0 89.33 235.75 2089.67 0.00 NaN NaN NaN
Debussy Images 9 866.0 2819.00 20685 0.0 1.0 96.22 313.22 2298.33 0.00 NaN NaN NaN
Debussy Other Piano Pieces 19 2289.0 6764.25 34518 0.0 1.0 120.47 356.01 1816.74 0.00 NaN NaN NaN
Debussy Pour Le Piano 3 501.0 1284.12 8808 0.0 1.0 167.00 428.04 2936.00 0.00 NaN NaN NaN
Debussy Preludes 24 1811.0 4715.50 33662 0.0 1.0 75.46 196.48 1402.58 0.00 NaN NaN NaN
Debussy Suite Bergamasque 4 421.0 1616.00 8210 1013.0 1.0 105.25 404.00 2052.50 253.25 NaN NaN NaN
sum 82 8082.0 23864.88 149385 1013.0 NaN NaN NaN NaN NaN NaN NaN NaN

Measures#

print(f"{len(all_measures.index)} measures over {len(all_measures.groupby(level=[0,1]))} files.")
all_measures.head()
8119 measures over 82 files.
mc mn quarterbeats duration_qb keysig timesig act_dur mc_offset numbering_offset dont_count barline breaks repeats next
corpus fname interval
debussy_childrens_corner l113-01_childrens_doctor [0.0, 4.0) 1 1 0 4.0 0 4/4 1 0 <NA> <NA> <NA> <NA> firstMeasure (2,)
[4.0, 8.0) 2 2 4 4.0 0 4/4 1 0 <NA> <NA> <NA> line <NA> (3,)
[8.0, 12.0) 3 3 8 4.0 0 4/4 1 0 <NA> <NA> <NA> <NA> <NA> (4,)
[12.0, 16.0) 4 4 12 4.0 0 4/4 1 0 <NA> <NA> <NA> <NA> <NA> (5,)
[16.0, 20.0) 5 5 16 4.0 0 4/4 1 0 <NA> <NA> <NA> <NA> <NA> (6,)
print("Distribution of time signatures per XML measure (MC):")
all_measures.timesig.value_counts(dropna=False)
Distribution of time signatures per XML measure (MC):
2/4      1763
3/4      1712
4/4      1682
6/8      1079
3/8       868
2/2       307
4/8       234
3/2       162
12/16     116
9/8       111
6/4        23
12/8       17
7/4        15
5/4        13
6/16       10
5/8         2
2/8         2
4/2         1
1/2         1
9/4         1
Name: timesig, dtype: int64

Harmony labels#

All symbols, independent of the local key (the mode of which changes their semantics).

try:
    all_annotations = dataset.get_facet('expanded')
except Exception:
    all_annotations = pd.DataFrame()
n_annotations = len(all_annotations.index)
includes_annotations = n_annotations > 0
if includes_annotations:
    display(all_annotations.head())
    print(f"Concatenated annotation tables contains {all_annotations.shape[0]} rows.")
    no_chord = all_annotations.root.isna()
    if no_chord.sum() > 0:
        print(f"{no_chord.sum()} of them are not chords. Their values are: {all_annotations.label[no_chord].value_counts(dropna=False).to_dict()}")
    all_chords = all_annotations[~no_chord].copy()
    print(f"Dataset contains {all_chords.shape[0]} tokens and {len(all_chords.chord.unique())} types over {len(all_chords.groupby(level=[0,1]))} documents.")
    all_annotations['corpus_name'] = all_annotations.index.get_level_values(0).map(get_corpus_display_name)
    all_chords['corpus_name'] = all_chords.index.get_level_values(0).map(get_corpus_display_name)
else:
    print(f"Dataset contains no annotations.")
mc mn quarterbeats quarterbeats_all_endings duration_qb mc_onset mn_onset timesig staff voice ... phraseend chord_type globalkey_is_minor localkey_is_minor chord_tones added_tones root bass_note alt_label special
corpus fname interval
debussy_suite_bergamasque l075-01_suite_prelude [0.0, 2.0) 1 1 0 0 2.0 0 0 4/4 2 1 ... { M False False (0, 4, 1) () 0 0 <NA> <NA>
[2.0, 8.0) 1 1 2 2 6.0 1/2 1/2 4/4 2 1 ... <NA> Mm7 False False (1, 5, 2, -1) (3,) 1 1 <NA> <NA>
[8.0, 10.0) 3 3 8 8 2.0 0 0 4/4 2 1 ... <NA> M False False (0, 4, 1) (2,) 0 0 <NA> <NA>
[10.0, 12.0) 3 3 10 10 2.0 1/2 1/2 4/4 2 1 ... <NA> M False False (-1, 3, 0) (2,) -1 -1 <NA> <NA>
[12.0, 14.0) 4 4 12 12 2.0 0 0 4/4 2 1 ... <NA> mm7 False False (1, 5, 2, 4) () 4 1 <NA> <NA>

5 rows × 31 columns

Concatenated annotation tables contains 1013 rows.
Dataset contains 1013 tokens and 291 types over 4 documents.