Annotations#
Show imports
%load_ext autoreload
%autoreload 2
import os
import dimcat as dc
import ms3
import plotly.express as px
from dimcat import groupers, plotting
import utils
Show source
RESULTS_PATH = os.path.abspath(os.path.join(utils.OUTPUT_FOLDER, "overview"))
os.makedirs(RESULTS_PATH, exist_ok=True)
def make_output_path(
filename: str,
extension=None,
path=RESULTS_PATH,
) -> str:
return utils.make_output_path(filename=filename, extension=extension, path=path)
def save_figure_as(
fig, filename, formats=("png", "pdf"), directory=RESULTS_PATH, **kwargs
):
if formats is not None:
for fmt in formats:
plotting.write_image(fig, filename, directory, format=fmt, **kwargs)
else:
plotting.write_image(fig, filename, directory, **kwargs)
Loading data
Show source
D = utils.get_dataset("ABC", corpus_release="v2.6")
package = D.inputs.get_package()
package_info = package._package.custom
git_tag = package_info.get("git_tag")
utils.print_heading("Data and software versions")
print("The Annotated Beethoven Corpus (ABC) version v2.6")
print(f"Datapackage '{package.package_name}' @ {git_tag}")
print(f"dimcat version {dc.__version__}\n")
D
---------------------------------------------------------------------------
PackageInconsistentlySerializedError Traceback (most recent call last)
Cell In[3], line 1
----> 1 D = utils.get_dataset("ABC", corpus_release="v2.6")
2 package = D.inputs.get_package()
3 package_info = package._package.custom
File ~/work/workflow_deployment/ABC/tmp_corpus_docs/notebooks/utils.py:3619, in get_dataset(corpus_name, target_dir, corpus_release)
3617 download_if_missing(zip_name, zip_path)
3618 download_if_missing(json_name, json_path)
-> 3619 return dc.Dataset.from_package(json_path)
File ~/.local/lib/python3.12/site-packages/dimcat/data/datasets/base.py:107, in Dataset.from_package(cls, package)
105 """Instantiate from a PackageSpecs by loading it into the inputs catalog."""
106 dataset = cls()
--> 107 dataset.load_package(package=package)
108 return dataset
File ~/.local/lib/python3.12/site-packages/dimcat/data/datasets/base.py:429, in Dataset.load_package(self, package, package_name, **options)
416 """Loads a package into the inputs catalog.
417
418 Args:
(...) 426
427 """
428 if isinstance(package, (str, Path)):
--> 429 package = DimcatPackage.from_descriptor_path(package, **options)
430 elif isinstance(package, dict):
431 package = DimcatPackage.from_descriptor(package, **options)
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:301, in Package.from_descriptor_path(cls, descriptor_path, basepath, auto_validate)
297 basepath, descriptor_filename = reconcile_base_and_file(
298 basepath, descriptor_path
299 )
300 fl_package = fl.Package.from_descriptor(descriptor_path)
--> 301 return cls.from_descriptor(
302 fl_package,
303 descriptor_filename=descriptor_filename,
304 auto_validate=auto_validate,
305 basepath=basepath,
306 )
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:268, in Package.from_descriptor(cls, descriptor, descriptor_filename, auto_validate, basepath)
258 ResourceConstructor = Resource
259 resources = [
260 ResourceConstructor.from_descriptor(
261 descriptor=resource,
(...) 266 for resource in fl_package.resources
267 ]
--> 268 return Constructor(
269 package_name=package_name,
270 resources=resources,
271 descriptor_filename=descriptor_filename,
272 basepath=basepath,
273 auto_validate=auto_validate,
274 metadata=fl_package.custom,
275 )
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/dc.py:60, in DimcatPackage.__init__(self, package_name, resources, basepath, descriptor_filename, auto_validate, metadata)
31 def __init__(
32 self,
33 package_name: str,
(...) 38 metadata: Optional[dict] = None,
39 ) -> None:
40 """
41
42 Args:
(...) 58 Custom metadata to be maintained in the package descriptor.
59 """
---> 60 super().__init__(
61 package_name=package_name,
62 resources=resources,
63 basepath=basepath,
64 descriptor_filename=descriptor_filename,
65 auto_validate=auto_validate,
66 metadata=metadata,
67 )
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:593, in Package.__init__(self, package_name, resources, basepath, descriptor_filename, auto_validate, metadata)
590 self.descriptor_filename = descriptor_filename
592 if resources is not None:
--> 593 self.extend(resources)
595 if auto_validate:
596 self.validate(raise_exception=True)
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:1017, in Package.extend(self, resources)
1015 return
1016 for n_added, resource in enumerate(resources, 1):
-> 1017 self._add_resource(
1018 resource,
1019 )
1020 self.logger.info(
1021 f"Package {self.package_name!r} was extended with {n_added} resources to a total "
1022 f"of {self.n_resources}."
1023 )
1024 status_after = self.status
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:938, in Package._add_resource(self, resource, mode)
936 self._resources.append(resource)
937 self._package.add_resource(resource.resource)
--> 938 self._update_status()
939 return resource
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:1508, in Package._update_status(self)
1507 def _update_status(self):
-> 1508 self._status = self._get_status()
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:1215, in Package._get_status(self)
1213 if not self.is_aligned:
1214 return PackageStatus.MISALIGNED
-> 1215 if not self.is_partially_serialized:
1216 return PackageStatus.ALIGNED
1217 if self.is_fully_serialized:
File ~/.local/lib/python3.12/site-packages/dimcat/data/packages/base.py:750, in Package.is_partially_serialized(self)
748 else:
749 existing, missing = self.normpath, self.get_descriptor_path()
--> 750 raise PackageInconsistentlySerializedError(self.package_name, existing, missing)
PackageInconsistentlySerializedError: The package 'abc' has been serialized in an inconsistent way, expected ZIP and descriptor, found only '/home/runner/work/workflow_deployment/ABC/tmp_corpus_docs/notebooks/ABC.datapackage.json' but not {'basepath': ~/work/workflow_deployment/ABC/tmp_corpus_docs/notebooks, 'filepath': 'abc.zip'}.
filtered_D = D.apply_step("HasHarmonyLabelsFilter")
all_metadata = filtered_D.get_metadata()
assert len(all_metadata) > 0, "No pieces selected for analysis."
chronological_corpus_names = all_metadata.get_corpus_names()
DCML harmony labels#
Show source
all_annotations = filtered_D.get_feature("DcmlAnnotations")
is_annotated_mask = all_metadata.label_count > 0
is_annotated_index = all_metadata.index[is_annotated_mask]
annotated_notes = filtered_D.get_feature("notes").subselect(is_annotated_index)
print(f"The annotated pieces have {len(annotated_notes)} notes.")
all_chords = filtered_D.get_feature("harmonylabels")
print(
f"{len(all_annotations)} annotations, of which {len(all_chords)} are harmony labels."
)
Harmony labels#
Unigrams#
For computing unigram statistics, the tokens need to be grouped by their occurrence within a major or a minor key
because this changes their meaning. To that aim, the annotated corpus needs to be sliced into contiguous localkey
segments which are then grouped into a major (is_minor=False
) and a minor group.
root_durations = (
all_chords[all_chords.root.between(-5, 6)]
.groupby(["root", "chord_type"])
.duration_qb.sum()
)
# sort by stacked bar length:
# root_durations = root_durations.sort_values(key=lambda S: S.index.get_level_values(0).map(S.groupby(level=0).sum()),
# ascending=False)
bar_data = root_durations.reset_index()
bar_data.root = bar_data.root.map(ms3.fifths2iv)
fig = px.bar(
bar_data,
x="root",
y="duration_qb",
color="chord_type",
title="Distribution of chord types over chord roots",
labels=dict(
root="Chord root expressed as interval above the local (or secondary) tonic",
duration_qb="duration in quarter notes",
chord_type="chord type",
),
)
fig.update_layout(**utils.STD_LAYOUT)
save_figure_as(fig, "chord_type_distribution_over_scale_degrees_absolute_stacked_bars")
fig.show()
relative_roots = all_chords[
["numeral", "duration_qb", "relativeroot", "localkey_is_minor", "chord_type"]
].copy()
relative_roots["relativeroot_resolved"] = ms3.transform(
relative_roots, ms3.resolve_relative_keys, ["relativeroot", "localkey_is_minor"]
)
has_rel = relative_roots.relativeroot_resolved.notna()
relative_roots.loc[has_rel, "localkey_is_minor"] = relative_roots.loc[
has_rel, "relativeroot_resolved"
].str.islower()
relative_roots["root"] = ms3.transform(
relative_roots, ms3.roman_numeral2fifths, ["numeral", "localkey_is_minor"]
)
chord_type_frequency = all_chords.chord_type.value_counts()
replace_rare = ms3.map_dict(
{t: "other" for t in chord_type_frequency[chord_type_frequency < 500].index}
)
relative_roots["type_reduced"] = relative_roots.chord_type.map(replace_rare)
# is_special = relative_roots.chord_type.isin(('It', 'Ger', 'Fr'))
# relative_roots.loc[is_special, 'root'] = -4
root_durations = (
relative_roots.groupby(["root", "type_reduced"])
.duration_qb.sum()
.sort_values(ascending=False)
)
bar_data = root_durations.reset_index()
bar_data.root = bar_data.root.map(ms3.fifths2iv)
root_order = (
bar_data.groupby("root")
.duration_qb.sum()
.sort_values(ascending=False)
.index.to_list()
)
fig = px.bar(
bar_data,
x="root",
y="duration_qb",
color="type_reduced",
barmode="group",
log_y=True,
color_discrete_map=utils.TYPE_COLORS,
category_orders=dict(
root=root_order,
type_reduced=relative_roots.type_reduced.value_counts().index.to_list(),
),
labels=dict(
root="intervallic difference between chord root to the local or secondary tonic",
duration_qb="duration in quarter notes",
type_reduced="chord type",
),
width=1000,
height=400,
)
fig.update_layout(
**utils.STD_LAYOUT,
legend=dict(
orientation="h",
xanchor="right",
x=1,
y=1,
),
)
save_figure_as(fig, "chord_type_distribution_over_scale_degrees_absolute_grouped_bars")
fig.show()
print(
f"Reduced to {len(set(bar_data.iloc[:,:2].itertuples(index=False, name=None)))} types. "
f"Paper cites the sum of types in major and types in minor (see below), treating them as distinct."
)
dim_or_aug = bar_data[
bar_data.root.str.startswith("a") | bar_data.root.str.startswith("d")
].duration_qb.sum()
complete = bar_data.duration_qb.sum()
print(
f"On diminished or augmented scale degrees: {dim_or_aug} / {complete} = {dim_or_aug / complete}"
)
chords_by_mode = groupers.ModeGrouper().process(all_chords)
chords_by_mode.format = "scale_degree"
Whole dataset#
unigram_proportions = chords_by_mode.get_default_analysis()
unigram_proportions.make_ranking_table()
chords_by_mode.apply_step("Counter")
chords_by_mode.format = "scale_degree"
chords_by_mode.get_default_analysis().make_ranking_table()
unigram_proportions.plot_grouped()