Overview#

This notebook gives a general overview of the features included in the dataset.

Hide imports
%load_ext autoreload
%autoreload 2
import os

import dimcat as dc
import pandas as pd
import plotly.express as px
from dimcat import filters, plotting
from IPython.display import display

import utils
RESULTS_PATH = os.path.abspath(os.path.join(utils.OUTPUT_FOLDER, "overview"))
os.makedirs(RESULTS_PATH, exist_ok=True)


def make_output_path(
    filename: str,
    extension=None,
    path=RESULTS_PATH,
) -> str:
    return utils.make_output_path(filename=filename, extension=extension, path=path)


def save_figure_as(
    fig, filename, formats=("png", "pdf"), directory=RESULTS_PATH, **kwargs
):
    if formats is not None:
        for fmt in formats:
            plotting.write_image(fig, filename, directory, format=fmt, **kwargs)
    else:
        plotting.write_image(fig, filename, directory, **kwargs)

Loading data

D = utils.get_dataset("bach_chorales", corpus_release="v2.0")
package = D.inputs.get_package()
package_info = package._package.custom
git_tag = package_info.get("git_tag")
utils.print_heading("Data and software versions")
print("Johann Sebastian Bach – The Chorales version v2.0")
print(f"Datapackage '{package.package_name}' @ {git_tag}")
print(f"dimcat version {dc.__version__}\n")
D
Data and software versions
--------------------------

Johann Sebastian Bach – The Chorales version v2.0
Datapackage 'bach_chorales' @ None
dimcat version 3.4.0
Dataset
=======
{'inputs': {'basepath': None,
            'packages': {'bach_chorales': ["'bach_chorales.measures' (MuseScoreFacetName.MuseScoreMeasures)",
                                           "'bach_chorales.notes' (MuseScoreFacetName.MuseScoreNotes)",
                                           "'bach_chorales.metadata' (FeatureName.Metadata)"]}},
 'outputs': {'basepath': None, 'packages': {}},
 'pipeline': []}
filtered_D = filters.HasHarmonyLabelsFilter(keep_values=[True]).process(D)
all_metadata = filtered_D.get_metadata()
assert len(all_metadata) > 0, "No pieces selected for analysis."
all_metadata
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
Cell In[4], line 1
----> 1 filtered_D = filters.HasHarmonyLabelsFilter(keep_values=[True]).process(D)
      2 all_metadata = filtered_D.get_metadata()
      3 assert len(all_metadata) > 0, "No pieces selected for analysis."

File ~/.local/lib/python3.12/site-packages/dimcat/steps/base.py:222, in PipelineStep.process(self, *data)
    218         data = single_obj
    219     else:
    220         # a single object was given which is neither a list nor a tuple, this is the
    221         # case where not to return a list
--> 222         return self.process_data(single_obj)
    223 return [self.process_data(d) for d in data]

File ~/.local/lib/python3.12/site-packages/dimcat/steps/base.py:245, in PipelineStep.process_data(self, data)
    234 """
    235 Perform a transformation on an input Data object. This should never alter the
    236 Data or its properties in place, instead returning a copy or view of the input.
   (...)    242     A copy of the input Data, potentially transformed or enhanced in some way defined by this PipelineStep.
    243 """
    244 if isinstance(data, Dataset):
--> 245     return self.process_dataset(data)
    246 if isinstance(data, DimcatResource):
    247     return self.process_resource(data)

File ~/.local/lib/python3.12/site-packages/dimcat/steps/base.py:260, in PipelineStep.process_dataset(self, dataset)
    258 """Apply this PipelineStep to a :class:`Dataset` and return a copy containing the output(s)."""
    259 self.check_dataset(dataset)
--> 260 return self._process_dataset(dataset)

File ~/.local/lib/python3.12/site-packages/dimcat/steps/base.py:494, in ResourceTransformation._process_dataset(self, dataset)
    492 """Apply this PipelineStep to a :class:`Dataset` and return a copy containing the output(s)."""
    493 new_dataset = self._make_new_dataset(dataset)
--> 494 self.fit_to_dataset(new_dataset)
    495 new_dataset._pipeline.add_step(self)
    496 feature_specs = self.get_feature_specs()

File ~/.local/lib/python3.12/site-packages/dimcat/steps/groupers/annotations.py:103, in HasHarmonyLabelsGrouper.fit_to_dataset(self, dataset)
    101 def fit_to_dataset(self, dataset: Dataset) -> None:
    102     metadata = dataset.get_metadata(raw=True)
--> 103     has_labels = metadata.df["label_count"] > 0
    104     grouping = has_labels.groupby(has_labels, sort=True).groups
    105     group_index = DimcatIndex.from_grouping(
    106         grouping, ("has_harmony_labels", "corpus", "piece")
    107     )

File ~/.local/lib/python3.12/site-packages/pandas/core/ops/common.py:76, in _unpack_zerodim_and_defer.<locals>.new_method(self, other)
     72             return NotImplemented
     74 other = item_from_zerodim(other)
---> 76 return method(self, other)

File ~/.local/lib/python3.12/site-packages/pandas/core/arraylike.py:56, in OpsMixin.__gt__(self, other)
     54 @unpack_zerodim_and_defer("__gt__")
     55 def __gt__(self, other):
---> 56     return self._cmp_method(other, operator.gt)

File ~/.local/lib/python3.12/site-packages/pandas/core/series.py:5803, in Series._cmp_method(self, other, op)
   5800 lvalues = self._values
   5801 rvalues = extract_array(other, extract_numpy=True, extract_range=True)
-> 5803 res_values = ops.comparison_op(lvalues, rvalues, op)
   5805 return self._construct_result(res_values, name=res_name)

File ~/.local/lib/python3.12/site-packages/pandas/core/ops/array_ops.py:332, in comparison_op(left, right, op)
    323         raise ValueError(
    324             "Lengths must match to compare", lvalues.shape, rvalues.shape
    325         )
    327 if should_extension_dispatch(lvalues, rvalues) or (
    328     (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT)
    329     and lvalues.dtype != object
    330 ):
    331     # Call the method on lvalues
--> 332     res_values = op(lvalues, rvalues)
    334 elif is_scalar(rvalues) and isna(rvalues):  # TODO: but not pd.NA?
    335     # numpy does not like comparisons vs None
    336     if op is operator.ne:

File ~/.local/lib/python3.12/site-packages/pandas/core/ops/common.py:76, in _unpack_zerodim_and_defer.<locals>.new_method(self, other)
     72             return NotImplemented
     74 other = item_from_zerodim(other)
---> 76 return method(self, other)

File ~/.local/lib/python3.12/site-packages/pandas/core/arraylike.py:56, in OpsMixin.__gt__(self, other)
     54 @unpack_zerodim_and_defer("__gt__")
     55 def __gt__(self, other):
---> 56     return self._cmp_method(other, operator.gt)

File ~/.local/lib/python3.12/site-packages/pandas/core/arrays/string_.py:581, in StringArray._cmp_method(self, other, op)
    578 else:
    579     # logical
    580     result = np.zeros(len(self._ndarray), dtype="bool")
--> 581     result[valid] = op(self._ndarray[valid], other)
    582     return BooleanArray(result, mask)

TypeError: '>' not supported between instances of 'str' and 'int'
mean_composition_years = utils.corpus_mean_composition_years(all_metadata)
chronological_order = mean_composition_years.index.to_list()
corpus_colors = dict(zip(chronological_order, utils.CORPUS_COLOR_SCALE))
corpus_names = {
    corp: utils.get_corpus_display_name(corp) for corp in chronological_order
}
chronological_corpus_names = list(corpus_names.values())
corpus_name_colors = {
    corpus_names[corp]: color for corp, color in corpus_colors.items()
}
mean_composition_years

Composition dates#

This section relies on the dataset’s metadata.

valid_composed_start = pd.to_numeric(all_metadata.composed_start, errors="coerce")
valid_composed_end = pd.to_numeric(all_metadata.composed_end, errors="coerce")
print(
    f"Composition dates range from {int(valid_composed_start.min())} {valid_composed_start.idxmin()} "
    f"to {int(valid_composed_end.max())} {valid_composed_end.idxmax()}."
)

Mean composition years per corpus#

def make_summary(metadata_df):
    piece_is_annotated = metadata_df.label_count > 0
    return metadata_df[piece_is_annotated].copy()
Hide source
summary = make_summary(all_metadata)
bar_data = pd.concat(
    [
        mean_composition_years.rename("year"),
        summary.groupby(level="corpus").size().rename("pieces"),
    ],
    axis=1,
).reset_index()

N = len(summary)
fig = px.bar(
    bar_data,
    x="year",
    y="pieces",
    color="corpus",
    color_discrete_map=corpus_colors,
    title=f"Temporal coverage of the {N} annotated pieces in the Distant Listening Corpus",
)
fig.update_traces(width=5)
fig.update_layout(**utils.STD_LAYOUT)
fig.update_traces(width=5)
save_figure_as(fig, "pieces_timeline_bars")
fig.show()
summary

Composition years histogram#

Hide source
hist_data = summary.reset_index()
hist_data.corpus = hist_data.corpus.map(corpus_names)
fig = px.histogram(
    hist_data,
    x="composed_end",
    color="corpus",
    labels=dict(
        composed_end="decade",
        count="pieces",
    ),
    color_discrete_map=corpus_name_colors,
    title=f"Temporal coverage of the {N} annotated pieces in the Distant Listening Corpus",
)
fig.update_traces(xbins=dict(size=10))
fig.update_layout(**utils.STD_LAYOUT)
fig.update_legends(font=dict(size=16))
save_figure_as(fig, "pieces_timeline_histogram", height=1250)
fig.show()

Dimensions#

Overview#

def make_overview_table(groupby, group_name="pieces"):
    n_groups = groupby.size().rename(group_name)
    absolute_numbers = dict(
        measures=groupby.last_mn.sum(),
        length=groupby.length_qb.sum(),
        notes=groupby.n_onsets.sum(),
        labels=groupby.label_count.sum(),
    )
    absolute = pd.DataFrame.from_dict(absolute_numbers)
    absolute = pd.concat([n_groups, absolute], axis=1)
    sum_row = pd.DataFrame(absolute.sum(), columns=["sum"]).T
    absolute = pd.concat([absolute, sum_row])
    return absolute


absolute = make_overview_table(summary.groupby("workTitle"))
# print(absolute.astype(int).to_markdown())
absolute.astype(int)
def summarize_dataset(D):
    all_metadata = D.get_metadata()
    summary = make_summary(all_metadata)
    return make_overview_table(summary.groupby(level=0))


corpus_summary = summarize_dataset(D)
print(corpus_summary.astype(int).to_markdown())

Measures#

all_measures = D.get_feature("measures")
print(
    f"{len(all_measures.index)} measures over {len(all_measures.groupby(level=[0,1]))} files."
)
all_measures.head()
all_measures.get_default_analysis().plot_grouped()

Harmony labels#

All symbols, independent of the local key (the mode of which changes their semantics).

try:
    all_annotations = D.get_feature("harmonylabels").df
except Exception:
    all_annotations = pd.DataFrame()
n_annotations = len(all_annotations.index)
includes_annotations = n_annotations > 0
if includes_annotations:
    display(all_annotations.head())
    print(f"Concatenated annotation tables contains {all_annotations.shape[0]} rows.")
    no_chord = all_annotations.root.isna()
    if no_chord.sum() > 0:
        print(
            f"{no_chord.sum()} of them are not chords. Their values are:"
            f" {all_annotations.label[no_chord].value_counts(dropna=False).to_dict()}"
        )
    all_chords = all_annotations[~no_chord].copy()
    print(
        f"Dataset contains {all_chords.shape[0]} tokens and {len(all_chords.chord.unique())} types over "
        f"{len(all_chords.groupby(level=[0,1]))} documents."
    )
    all_annotations["corpus_name"] = all_annotations.index.get_level_values(0).map(
        utils.get_corpus_display_name
    )
    all_chords["corpus_name"] = all_chords.index.get_level_values(0).map(
        utils.get_corpus_display_name
    )
else:
    print("Dataset contains no annotations.")