You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
249 lines
10 KiB
Plaintext
249 lines
10 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 93,
|
|
"id": "1b7b9f62-156d-4ac5-876d-0305a25d99e4",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"389"
|
|
]
|
|
},
|
|
"execution_count": 93,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"from itertools import chain, combinations, permutations, product\n",
|
|
"from math import prod, log\n",
|
|
"from copy import deepcopy\n",
|
|
"\n",
|
|
"# this is modified for different chord sizes like original version\n",
|
|
"def grow_chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" #this could use the tranpose_pitch function\n",
|
|
" branches = [branch for alt in [-1, 1] for d in range(1, len(root)) if (branch:=(*(r:=root)[:d], r[d] + alt, *r[(d + 1):])) not in chord]\n",
|
|
" subsets = chain.from_iterable(combinations(branches, r) for r in range(1, max_chord_size - len(chord) + 1))\n",
|
|
" for subset in subsets:\n",
|
|
" extended_chord = chord + subset\n",
|
|
" if(len(extended_chord) < max_chord_size):\n",
|
|
" for branch in subset:\n",
|
|
" yield from grow_chords(extended_chord, branch, min_chord_size, max_chord_size)\n",
|
|
" if(len(extended_chord) >= min_chord_size):\n",
|
|
" yield tuple(sorted(extended_chord))\n",
|
|
"\n",
|
|
"def chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" # this will filter out the 4x dups of paths that are loops, there might be a faster way to test this\n",
|
|
" return set(grow_chords(chord, root, min_chord_size, max_chord_size))\n",
|
|
"\n",
|
|
"root = (0, 0, 0, 0)\n",
|
|
"chord = (root,)\n",
|
|
"chords = chords(chord, root, 3, 4)\n",
|
|
"len(chords)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 133,
|
|
"id": "806f6f69-1e0b-4d34-aac9-695c8531cdb1",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from itertools import chain, combinations, permutations, product\n",
|
|
"from math import prod, log\n",
|
|
"from copy import deepcopy\n",
|
|
"import networkx as nx\n",
|
|
"\n",
|
|
"def hs_array_to_fr(hs_array):\n",
|
|
" return prod([pow(dims[d], hs_array[d]) for d in range(len(dims))])\n",
|
|
"\n",
|
|
"def hs_array_to_cents(hs_array):\n",
|
|
" return (1200 * log(hs_array_to_fr(hs_array), 2))\n",
|
|
"\n",
|
|
"def expand_pitch(hs_array):\n",
|
|
" expanded_pitch = list(hs_array)\n",
|
|
" frequency_ratio = hs_array_to_fr(hs_array)\n",
|
|
" if frequency_ratio < 1:\n",
|
|
" while frequency_ratio < 1:\n",
|
|
" frequency_ratio *= 2\n",
|
|
" expanded_pitch[0] += 1\n",
|
|
" elif frequency_ratio >= 2:\n",
|
|
" while frequency_ratio >= 2:\n",
|
|
" frequency_ratio *= 1/2\n",
|
|
" expanded_pitch[0] += -1\n",
|
|
" return tuple(expanded_pitch)\n",
|
|
"\n",
|
|
"def expand_chord(chord):\n",
|
|
" return tuple(expand_pitch(p) for p in chord)\n",
|
|
"\n",
|
|
"def collapse_pitch(hs_array):\n",
|
|
" collapsed_pitch = list(hs_array)\n",
|
|
" collapsed_pitch[0] = 0\n",
|
|
" return tuple(collapsed_pitch)\n",
|
|
"\n",
|
|
"def collapse_chord(chord):\n",
|
|
" return tuple(collapse_pitch(p) for p in chord)\n",
|
|
"\n",
|
|
"def transpose_pitch(pitch, trans):\n",
|
|
" return tuple(map(lambda x,y:x+y, pitch, trans))\n",
|
|
"\n",
|
|
"def transpose_chord(chord, trans):\n",
|
|
" return tuple(transpose_pitch(p, trans) for p in chord)\n",
|
|
"\n",
|
|
"def cent_difference(hs_array1, hs_array2):\n",
|
|
" return hs_array_to_cents(hs_array2) - hs_array_to_cents(hs_array1)\n",
|
|
"\n",
|
|
"def pitch_difference(hs_array1, hs_array2):\n",
|
|
" return transpose_pitch(hs_array1, [p * -1 for p in hs_array2])\n",
|
|
"\n",
|
|
"# this is modified for different chord sizes like original version\n",
|
|
"def grow_chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" #this could use the tranpose_pitch function\n",
|
|
" branches = [branch for alt in [-1, 1] for d in range(1, len(root)) if (branch:=(*(r:=root)[:d], r[d] + alt, *r[(d + 1):])) not in chord]\n",
|
|
" subsets = chain.from_iterable(combinations(branches, r) for r in range(1, max_chord_size - len(chord) + 1))\n",
|
|
" for subset in subsets:\n",
|
|
" extended_chord = chord + subset\n",
|
|
" if(len(extended_chord) < max_chord_size):\n",
|
|
" for branch in subset:\n",
|
|
" yield from grow_chords(extended_chord, branch, min_chord_size, max_chord_size)\n",
|
|
" if(len(extended_chord) >= min_chord_size):\n",
|
|
" yield tuple(sorted(extended_chord))\n",
|
|
"\n",
|
|
"def chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" # this will filter out the 4x dups of paths that are loops, there might be a faster way to test this\n",
|
|
" return set(grow_chords(chord, root, min_chord_size, max_chord_size))\n",
|
|
"\n",
|
|
"# this is very slow, I have an idea in mind that my be faster by simply growing the chords to max_chord_size + max_sim_diff\n",
|
|
"# technically at that point you have generated both chords and can get the second chord from the first\n",
|
|
"def edges(chords, min_symdiff, max_symdiff, max_chord_size): \n",
|
|
" def reverse_dict(dict):\n",
|
|
" rev_dict = deepcopy(dict)\n",
|
|
" rev_trans = tuple(t * -1 for t in rev_dict['transposition'])\n",
|
|
" rev_dict['transposition'] = rev_trans\n",
|
|
" rev_dict['movements'] = {\n",
|
|
" value['destination']:{\n",
|
|
" 'destination':key, \n",
|
|
" 'cent_difference':value['cent_difference']\n",
|
|
" } for key, value in rev_dict['movements'].items()}\n",
|
|
" return rev_dict\n",
|
|
"\n",
|
|
" def is_directly_tunable(intersection, diff):\n",
|
|
" return max([len(collapse_pitch(pitch_difference(d, set(list(intersection)[0])))) for d in diff]) == 1\n",
|
|
"\n",
|
|
" def edge_data(chords):\n",
|
|
" [expanded_base, expanded_comp] = [set(expand_chord(chord)) for chord in chords]\n",
|
|
" edges = []\n",
|
|
" transpositions = set(pitch_difference(pair[0], pair[1]) for pair in set(product(expanded_base, expanded_comp)))\n",
|
|
" for trans in transpositions:\n",
|
|
" rev_trans = tuple(t * -1 for t in trans)\n",
|
|
" expanded_comp_transposed = set(transpose_chord(expanded_comp, trans))\n",
|
|
" intersection = expanded_base & expanded_comp_transposed\n",
|
|
" [diff1, diff2] = [list(chord - intersection) for chord in [expanded_base, expanded_comp_transposed]]\n",
|
|
" base_map = {val: {'destination':transpose_pitch(val, rev_trans), 'cent_difference': 0} for val in intersection}\n",
|
|
" symdiff_len = (len(diff1) + len(diff2))\n",
|
|
" if (min_symdiff <= symdiff_len <= max_symdiff):\n",
|
|
" edge_dict = {\n",
|
|
" 'transposition': trans, \n",
|
|
" 'symmetric_difference': symdiff_len, \n",
|
|
" 'is_directly_tunable': is_directly_tunable(intersection, diff2)\n",
|
|
" }\n",
|
|
" maps = []\n",
|
|
" diff1 += [None] * (max_chord_size - len(diff1) - len(intersection))\n",
|
|
" perms = [list(perm) + [None] * (max_chord_size - len(perm) - len(intersection)) for perm in set(permutations(diff2))]\n",
|
|
" for p in perms:\n",
|
|
" appended_map = {\n",
|
|
" diff1[index]:\n",
|
|
" {\n",
|
|
" 'destination': transpose_pitch(val, rev_trans) if val != None else None, \n",
|
|
" 'cent_difference': cent_difference(diff1[index], val) if None not in [diff1[index], val] else None\n",
|
|
" } for index, val in enumerate(p)}\n",
|
|
" edge_dict['movements'] = base_map | appended_map\n",
|
|
" edges.append((tuple(expanded_base), tuple(expanded_comp), edge_dict))\n",
|
|
" edges.append((tuple(expanded_comp), tuple(expanded_base), reverse_dict(edge_dict)))\n",
|
|
" return edges if edges != [] else None\n",
|
|
" \n",
|
|
" return list(chain(*[e for c in combinations(chords, 2) if (e := edge_data(c)) is not None]))\n",
|
|
"\n",
|
|
"def graph_from_edges(edges):\n",
|
|
" g = nx.MultiDiGraph()\n",
|
|
" g.add_edges_from(edges)\n",
|
|
" return g\n",
|
|
"\n",
|
|
"def generate_graph(chord_set, min_symdiff, max_symdiff, max_chord_size):\n",
|
|
" #chord_set = chords(pitch_set, min_chord_size, max_chord_size)\n",
|
|
" edge_set = edges(chord_set, min_symdiff, max_symdiff, max_chord_size)\n",
|
|
" res_graph = graph_from_edges(edge_set)\n",
|
|
" return res_graph\n",
|
|
"\n",
|
|
"def display_graph(graph):\n",
|
|
" show_graph = nx.Graph(graph)\n",
|
|
" pos = nx.draw_spring(show_graph, node_size=5, width=0.1)\n",
|
|
" plt.figure(1, figsize=(12,12)) \n",
|
|
" nx.draw(show_graph, pos, node_size=5, width=0.1)\n",
|
|
" plt.show()\n",
|
|
" #plt.savefig('compact_sets.png', dpi=150)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 145,
|
|
"id": "3b220e4f-af29-4226-b60d-30078da05663",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dims = (2, 3, 5, 7)\n",
|
|
"root = (0, 0, 0, 0)\n",
|
|
"chord = (root,)\n",
|
|
"chord_set = chords(chord, root, 4, 4)\n",
|
|
"#edges(chord_set, 2, 2, 3)\n",
|
|
"graph = generate_graph(chord_set, 2, 2, 4)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 144,
|
|
"id": "472e3033-cf7f-43da-9396-df6c6ee426b8",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"344"
|
|
]
|
|
},
|
|
"execution_count": 144,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"len(graph.nodes)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|