You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
609 lines
27 KiB
Plaintext
609 lines
27 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 89,
|
|
"id": "806f6f69-1e0b-4d34-aac9-695c8531cdb1",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from itertools import chain, combinations, permutations, product\n",
|
|
"from math import prod, log\n",
|
|
"from copy import deepcopy\n",
|
|
"import networkx as nx\n",
|
|
"from fractions import Fraction\n",
|
|
"import json\n",
|
|
"from operator import add\n",
|
|
"\n",
|
|
"def hs_array_to_fr(hs_array):\n",
|
|
" return prod([pow(dims[d], hs_array[d]) for d in range(len(dims))])\n",
|
|
"\n",
|
|
"def hs_array_to_cents(hs_array):\n",
|
|
" return (1200 * log(hs_array_to_fr(hs_array), 2))\n",
|
|
"\n",
|
|
"def expand_pitch(hs_array):\n",
|
|
" expanded_pitch = list(hs_array)\n",
|
|
" frequency_ratio = hs_array_to_fr(hs_array)\n",
|
|
" if frequency_ratio < 1:\n",
|
|
" while frequency_ratio < 1:\n",
|
|
" frequency_ratio *= 2\n",
|
|
" expanded_pitch[0] += 1\n",
|
|
" elif frequency_ratio >= 2:\n",
|
|
" while frequency_ratio >= 2:\n",
|
|
" frequency_ratio *= 1/2\n",
|
|
" expanded_pitch[0] += -1\n",
|
|
" return tuple(expanded_pitch)\n",
|
|
"\n",
|
|
"def expand_chord(chord):\n",
|
|
" return tuple(expand_pitch(p) for p in chord)\n",
|
|
"\n",
|
|
"def collapse_pitch(hs_array):\n",
|
|
" collapsed_pitch = list(hs_array)\n",
|
|
" collapsed_pitch[0] = 0\n",
|
|
" return tuple(collapsed_pitch)\n",
|
|
"\n",
|
|
"def collapse_chord(chord):\n",
|
|
" return tuple(collapse_pitch(p) for p in chord)\n",
|
|
"\n",
|
|
"def transpose_pitch(pitch, trans):\n",
|
|
" return tuple(map(add, pitch, trans))\n",
|
|
"\n",
|
|
"def transpose_chord(chord, trans):\n",
|
|
" return tuple(transpose_pitch(p, trans) for p in chord)\n",
|
|
"\n",
|
|
"def cent_difference(hs_array1, hs_array2):\n",
|
|
" return hs_array_to_cents(hs_array2) - hs_array_to_cents(hs_array1)\n",
|
|
"\n",
|
|
"def pitch_difference(hs_array1, hs_array2):\n",
|
|
" return transpose_pitch(hs_array1, [p * -1 for p in hs_array2])\n",
|
|
"\n",
|
|
"# this is modified for different chord sizes like original version\n",
|
|
"def grow_chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" #this could use the tranpose_pitch function\n",
|
|
" branches = [branch for alt in [-1, 1] for d in range(1, len(root)) if (branch:=(*(r:=root)[:d], r[d] + alt, *r[(d + 1):])) not in chord]\n",
|
|
" subsets = chain.from_iterable(combinations(branches, r) for r in range(1, max_chord_size - len(chord) + 1))\n",
|
|
" for subset in subsets:\n",
|
|
" extended_chord = chord + subset\n",
|
|
" if(len(extended_chord) < max_chord_size):\n",
|
|
" for branch in subset:\n",
|
|
" yield from grow_chords(extended_chord, branch, min_chord_size, max_chord_size)\n",
|
|
" if(len(extended_chord) >= min_chord_size):\n",
|
|
" yield tuple(sorted(extended_chord, key=hs_array_to_fr))\n",
|
|
"\n",
|
|
"def chords(chord, root, min_chord_size, max_chord_size):\n",
|
|
" # this will filter out the 4x dups of paths that are loops, there might be a faster way to test this\n",
|
|
" return set(grow_chords(chord, root, min_chord_size, max_chord_size))\n",
|
|
"\n",
|
|
"# this is very slow, I have an idea in mind that my be faster by simply growing the chords to max_chord_size + max_sim_diff\n",
|
|
"# technically at that point you have generated both chords and can get the second chord from the first\n",
|
|
"def edges(chords, min_symdiff, max_symdiff, max_chord_size): \n",
|
|
"\n",
|
|
" def reverse_movements(movements):\n",
|
|
" return {value['destination']:{'destination':key, 'cent_difference':value['cent_difference'] * -1} for key, value in movements.items()}\n",
|
|
"\n",
|
|
" def is_directly_tunable(intersection, diff):\n",
|
|
" # this only works for now when intersection if one element - need to fix that\n",
|
|
" return max([sum(abs(p) for p in collapse_pitch(pitch_difference(d, list(intersection)[0]))) for d in diff]) == 1\n",
|
|
"\n",
|
|
" for combination in combinations(chords, 2):\n",
|
|
" [expanded_base, expanded_comp] = [expand_chord(chord) for chord in combination]\n",
|
|
" edges = []\n",
|
|
" transpositions = set(pitch_difference(pair[0], pair[1]) for pair in set(product(expanded_base, expanded_comp)))\n",
|
|
" for trans in transpositions:\n",
|
|
" expanded_comp_transposed = transpose_chord(expanded_comp, trans)\n",
|
|
" intersection = set(expanded_base) & set(expanded_comp_transposed)\n",
|
|
" symdiff_len = sum([len(chord) - len(intersection) for chord in [expanded_base, expanded_comp_transposed]])\n",
|
|
" if (min_symdiff <= symdiff_len <= max_symdiff):\n",
|
|
" rev_trans = tuple(t * -1 for t in trans)\n",
|
|
" [diff1, diff2] = [list(set(chord) - intersection) for chord in [expanded_base, expanded_comp_transposed]]\n",
|
|
" base_map = {val: {'destination':transpose_pitch(val, rev_trans), 'cent_difference': 0} for val in intersection}\n",
|
|
" base_map_rev = reverse_movements(base_map)\n",
|
|
" maps = []\n",
|
|
" diff1 += [None] * (max_chord_size - len(diff1) - len(intersection))\n",
|
|
" perms = [list(perm) + [None] * (max_chord_size - len(perm) - len(intersection)) for perm in set(permutations(diff2))]\n",
|
|
" for p in perms:\n",
|
|
" appended_map = {\n",
|
|
" diff1[index]:\n",
|
|
" {\n",
|
|
" 'destination': transpose_pitch(val, rev_trans) if val != None else None, \n",
|
|
" 'cent_difference': cent_difference(diff1[index], val) if None not in [diff1[index], val] else None\n",
|
|
" } for index, val in enumerate(p)}\n",
|
|
" yield (tuple(expanded_base), tuple(expanded_comp), {\n",
|
|
" 'transposition': trans,\n",
|
|
" 'symmetric_difference': symdiff_len, \n",
|
|
" 'is_directly_tunable': is_directly_tunable(intersection, diff2),\n",
|
|
" 'movements': base_map | appended_map\n",
|
|
" },)\n",
|
|
" yield (tuple(expanded_comp), tuple(expanded_base), {\n",
|
|
" 'transposition': rev_trans,\n",
|
|
" 'symmetric_difference': symdiff_len, \n",
|
|
" 'is_directly_tunable': is_directly_tunable(intersection, diff1),\n",
|
|
" 'movements': base_map_rev | reverse_movements(appended_map)\n",
|
|
" },)\n",
|
|
"\n",
|
|
"def graph_from_edges(edges):\n",
|
|
" g = nx.MultiDiGraph()\n",
|
|
" g.add_edges_from(edges)\n",
|
|
" return g\n",
|
|
"\n",
|
|
"def generate_graph(chord_set, min_symdiff, max_symdiff, max_chord_size):\n",
|
|
" #chord_set = chords(pitch_set, min_chord_size, max_chord_size)\n",
|
|
" edge_set = edges(chord_set, min_symdiff, max_symdiff, max_chord_size)\n",
|
|
" res_graph = graph_from_edges(edge_set)\n",
|
|
" return res_graph\n",
|
|
"\n",
|
|
"def display_graph(graph):\n",
|
|
" show_graph = nx.Graph(graph)\n",
|
|
" pos = nx.draw_spring(show_graph, node_size=5, width=0.1)\n",
|
|
" plt.figure(1, figsize=(12,12)) \n",
|
|
" nx.draw(show_graph, pos, node_size=5, width=0.1)\n",
|
|
" plt.show()\n",
|
|
" #plt.savefig('compact_sets.png', dpi=150)\n",
|
|
"\n",
|
|
"def path_to_chords(path, start_root):\n",
|
|
" current_root = start_root\n",
|
|
" start_chord = tuple(sorted(path[0][0], key=hs_array_to_fr))\n",
|
|
" chords = ((start_chord, start_chord,),)\n",
|
|
" for edge in path:\n",
|
|
" trans = edge[2]['transposition']\n",
|
|
" movements = edge[2]['movements']\n",
|
|
" current_root = transpose_pitch(current_root, trans)\n",
|
|
" current_ref_chord = chords[-1][0]\n",
|
|
" next_ref_chord = tuple(movements[pitch]['destination'] for pitch in current_ref_chord)\n",
|
|
" next_transposed_chord = tuple(transpose_pitch(pitch, current_root) for pitch in next_ref_chord)\n",
|
|
" chords += ((next_ref_chord, next_transposed_chord,),)\n",
|
|
" return tuple(chord[1] for chord in chords)\n",
|
|
"\n",
|
|
"def write_chord_sequence(path):\n",
|
|
" file = open(\"seq.txt\", \"w+\")\n",
|
|
" content = json.dumps(path)\n",
|
|
" content = content.replace(\"[[[\", \"[\\n\\t[[\")\n",
|
|
" content = content.replace(\", [[\", \",\\n\\t[[\")\n",
|
|
" content = content.replace(\"]]]\", \"]]\\n]\")\n",
|
|
" file.write(content)\n",
|
|
" file.close()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 214,
|
|
"id": "4e3ef738-7f64-47c3-9129-0450fd031375",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dims = (2, 3, 5, 7, 11)\n",
|
|
"root = (0, 0, 0, 0, 0)\n",
|
|
"chord = (root,)\n",
|
|
"chord_set = chords(chord, root, 3, 3)\n",
|
|
"graph = generate_graph(chord_set, 4, 4, 3)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 215,
|
|
"id": "aea5215c-8551-4685-b761-11c2dc74cf22",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"260"
|
|
]
|
|
},
|
|
"execution_count": 215,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"from random import choice, choices\n",
|
|
"\n",
|
|
"# This is for the static version\n",
|
|
"def stochastic_hamiltonian(graph):\n",
|
|
" \n",
|
|
" def movement_size_weights(edges):\n",
|
|
" \n",
|
|
" def max_cent_diff(edge):\n",
|
|
" res = max([abs(v) for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None])\n",
|
|
" return res\n",
|
|
" \n",
|
|
" def min_cent_diff(edge):\n",
|
|
" res = [abs(v) for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
|
|
" res.remove(0)\n",
|
|
" return min(res)\n",
|
|
" \n",
|
|
" for e in edges:\n",
|
|
" yield 1000 if ((max_cent_diff(e) < 200) and (min_cent_diff(e)) > 50) else 1\n",
|
|
"\n",
|
|
" def hamiltonian_weights(edges):\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if e[1] not in [path_edge[0] for path_edge in path] else 1 / graph.nodes[e[1]]['count']\n",
|
|
" \n",
|
|
" def contrary_motion_weights(edges):\n",
|
|
"\n",
|
|
" def is_contrary(edge):\n",
|
|
" cent_diffs = [v for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
|
|
" cent_diffs.sort()\n",
|
|
" return (cent_diffs[0] < 0) and (cent_diffs[1] == 0) and (cent_diffs[2] > 0)\n",
|
|
"\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if is_contrary(e) else 1\n",
|
|
" \n",
|
|
" def is_directly_tunable_weights(edges):\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if e[2]['is_directly_tunable'] else 0\n",
|
|
"\n",
|
|
" def voice_crossing_weights(edges):\n",
|
|
" \n",
|
|
" def has_voice_crossing(edge):\n",
|
|
" source = list(edge[0])\n",
|
|
" ordered_source = sorted(source, key=hs_array_to_fr) \n",
|
|
" source_order = [ordered_source.index(p) for p in source]\n",
|
|
" destination = [transpose_pitch(edge[2]['movements'][p]['destination'], edge[2]['transposition']) for p in source]\n",
|
|
" ordered_destination = sorted(destination, key=hs_array_to_fr)\n",
|
|
" destination_order = [ordered_destination.index(p) for p in destination]\n",
|
|
" return source_order != destination_order\n",
|
|
"\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if not has_voice_crossing(e) else 0\n",
|
|
"\n",
|
|
" def is_bass_rooted(chord):\n",
|
|
" return max([sum(abs(p) for p in collapse_pitch(pitch_difference(chord[0], p))) for p in chord[1:]]) == 1\n",
|
|
" \n",
|
|
" check_graph = graph.copy()\n",
|
|
" next_node = choice([node for node in graph.nodes() if is_bass_rooted(node)])\n",
|
|
" check_graph.remove_node(next_node)\n",
|
|
" for node in graph.nodes(data=True):\n",
|
|
" node[1]['count'] = 1\n",
|
|
" path = []\n",
|
|
" while (nx.number_of_nodes(check_graph) > 0) and (len(path) < 5000):\n",
|
|
" out_edges = list(graph.out_edges(next_node, data=True))\n",
|
|
" #print([l for l in zip(movement_size_weights(out_edges), hamiltonian_weights(out_edges))])\n",
|
|
" factors = [\n",
|
|
" movement_size_weights(out_edges), \n",
|
|
" hamiltonian_weights(out_edges), \n",
|
|
" contrary_motion_weights(out_edges), \n",
|
|
" is_directly_tunable_weights(out_edges),\n",
|
|
" voice_crossing_weights(out_edges)\n",
|
|
" ]\n",
|
|
" weights = [prod(a) for a in zip(*factors)]\n",
|
|
" edge = choices(out_edges, weights=weights)[0]\n",
|
|
" #edge = random.choice(out_edges)\n",
|
|
" next_node = edge[1]\n",
|
|
" node[1]['count'] += 1\n",
|
|
" path.append(edge)\n",
|
|
" if next_node in check_graph.nodes:\n",
|
|
" check_graph.remove_node(next_node)\n",
|
|
" return path\n",
|
|
" \n",
|
|
"path = stochastic_hamiltonian(graph)\n",
|
|
"#for edge in path:\n",
|
|
"# print(edge)\n",
|
|
"write_chord_sequence(path_to_chords(path, root))\n",
|
|
"len(path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"id": "ac9e15be-5495-405c-9ce4-ae40d97c7814",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"[]"
|
|
]
|
|
},
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def is_super_compact(chord):\n",
|
|
" return max([sum(abs(p) for p in collapse_pitch(pitch_difference(c[0], c[1]))) for c in combinations(chord, 2)]) == 1\n",
|
|
" \n",
|
|
"[node for node in graph.nodes() if is_super_compact(node)]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "7f2d356f-6377-46cf-bbb1-32111be90f4f",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"The line_profiler extension is already loaded. To reload it, use:\n",
|
|
" %reload_ext line_profiler\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"%load_ext line_profiler"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 134,
|
|
"id": "7f141bf5-fdcb-4c01-a10b-3e86d3d1a7b4",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"chord_set = chords(chord, root, 3, 3)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 136,
|
|
"id": "88850b8c-a743-44d0-b863-7cd9066690d9",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"Timer unit: 1e-09 s\n",
|
|
"\n",
|
|
"Total time: 0.112228 s\n",
|
|
"File: /tmp/ipykernel_515812/2679284550.py\n",
|
|
"Function: edge_data at line 74\n",
|
|
"\n",
|
|
"Line # Hits Time Per Hit % Time Line Contents\n",
|
|
"==============================================================\n",
|
|
" 74 def edge_data(chords, min_symdiff, max_symdiff, max_chord_size):\n",
|
|
" 75 990 29603044.0 29902.1 26.4 [expanded_base, expanded_comp] = [expand_chord(chord) for chord in chords]\n",
|
|
" 76 990 229527.0 231.8 0.2 edges = []\n",
|
|
" 77 990 23648371.0 23887.2 21.1 transpositions = set(pitch_difference(pair[0], pair[1]) for pair in set(product(expanded_base, expanded_comp)))\n",
|
|
" 78 9193 2315267.0 251.9 2.1 for trans in transpositions:\n",
|
|
" 79 8203 33386775.0 4070.1 29.7 expanded_comp_transposed = transpose_chord(expanded_comp, trans)\n",
|
|
" 80 8203 8393773.0 1023.3 7.5 intersection = set(expanded_base) & set(expanded_comp_transposed)\n",
|
|
" 81 8203 11812057.0 1440.0 10.5 symdiff_len = sum([len(chord) - len(intersection) for chord in [expanded_base, expanded_comp_transposed]])\n",
|
|
" 82 8203 2530596.0 308.5 2.3 if (min_symdiff <= symdiff_len <= max_symdiff):\n",
|
|
" 83 rev_trans = tuple(t * -1 for t in trans)\n",
|
|
" 84 [diff1, diff2] = [list(set(chord) - intersection) for chord in [expanded_base, expanded_comp_transposed]]\n",
|
|
" 85 base_map = {val: {'destination':transpose_pitch(val, rev_trans), 'cent_difference': 0} for val in intersection}\n",
|
|
" 86 base_map_rev = reverse_movements(base_map)\n",
|
|
" 87 tunability = is_directly_tunable(intersection, diff2)\n",
|
|
" 88 maps = []\n",
|
|
" 89 diff1 += [None] * (max_chord_size - len(diff1) - len(intersection))\n",
|
|
" 90 perms = [list(perm) + [None] * (max_chord_size - len(perm) - len(intersection)) for perm in set(permutations(diff2))]\n",
|
|
" 91 for p in perms:\n",
|
|
" 92 appended_map = {\n",
|
|
" 93 diff1[index]:\n",
|
|
" 94 {\n",
|
|
" 95 'destination': transpose_pitch(val, rev_trans) if val != None else None, \n",
|
|
" 96 'cent_difference': cent_difference(diff1[index], val) if None not in [diff1[index], val] else None\n",
|
|
" 97 } for index, val in enumerate(p)}\n",
|
|
" 98 edges.append((tuple(expanded_base), tuple(expanded_comp), {\n",
|
|
" 99 'transposition': trans,\n",
|
|
" 100 'symmetric_difference': symdiff_len, \n",
|
|
" 101 'is_directly_tunable': tunability,\n",
|
|
" 102 'movements': base_map | appended_map\n",
|
|
" 103 }))\n",
|
|
" 104 edges.append((tuple(expanded_comp), tuple(expanded_base), {\n",
|
|
" 105 'transposition': rev_trans,\n",
|
|
" 106 'symmetric_difference': symdiff_len, \n",
|
|
" 107 'is_directly_tunable': tunability,\n",
|
|
" 108 'movements': base_map_rev | reverse_movements(appended_map)\n",
|
|
" 109 }))\n",
|
|
" 110 990 308812.0 311.9 0.3 return edges if edges != [] else None"
|
|
]
|
|
},
|
|
"metadata": {},
|
|
"output_type": "display_data"
|
|
}
|
|
],
|
|
"source": [
|
|
"lprun -f edge_data edges(chord_set, 3, 3, 4)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 180,
|
|
"id": "a76dc0f3-02e2-4739-9014-b53d3a590e3d",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"dims = (2, 3, 5, 7, 11)\n",
|
|
"root = (0, 0, 0, 0, 0)\n",
|
|
"chord = (root,)\n",
|
|
"chord_set = chords(chord, root, 3, 3)\n",
|
|
"graph = generate_graph(chord_set, 2, 2, 3)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 213,
|
|
"id": "6e4ecb10-344b-4721-b2f4-68de91d712db",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"((4, 0, 0, 0, -1), (3, 0, -1, 0, 0), (0, 0, 0, 0, 0))\n",
|
|
"0\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"144"
|
|
]
|
|
},
|
|
"execution_count": 213,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"from random import choice, choices\n",
|
|
"\n",
|
|
"# This is for the rising version / yitgadal\n",
|
|
"def stochastic_hamiltonian(graph):\n",
|
|
" \n",
|
|
" def movement_size_weights(edges):\n",
|
|
" \n",
|
|
" def max_cent_diff(edge):\n",
|
|
" res = max([v for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None])\n",
|
|
" return res\n",
|
|
" \n",
|
|
" def min_cent_diff(edge):\n",
|
|
" res = [v for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
|
|
" res.remove(0)\n",
|
|
" return min(res)\n",
|
|
" \n",
|
|
" for e in edges:\n",
|
|
" yield 1000 if ((max_cent_diff(e) < 175) and (min_cent_diff(e)) >= 0) else 1\n",
|
|
"\n",
|
|
" def hamiltonian_weights(edges):\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if e[1] not in [path_edge[0] for path_edge in path] else 1 / graph.nodes[e[1]]['count']\n",
|
|
" \n",
|
|
" def contrary_motion_weights(edges):\n",
|
|
"\n",
|
|
" def is_contrary(edge):\n",
|
|
" cent_diffs = [v for val in edge[2]['movements'].values() if (v:=val['cent_difference']) is not None]\n",
|
|
" cent_diffs.sort()\n",
|
|
" return (cent_diffs[0] < 0) and (cent_diffs[1] == 0) and (cent_diffs[2] > 0)\n",
|
|
"\n",
|
|
" for e in edges:\n",
|
|
" yield 2 if is_contrary(e) else 1\n",
|
|
" \n",
|
|
" def is_directly_tunable_weights(edges):\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if e[2]['is_directly_tunable'] else 0\n",
|
|
"\n",
|
|
" def transposition_weight(edges):\n",
|
|
" for e in edges:\n",
|
|
" yield 1000 if 0 <= hs_array_to_cents(e[2]['transposition']) < 100 else 0\n",
|
|
"\n",
|
|
" def is_sustained_voice(edges, voice):\n",
|
|
" \n",
|
|
" def is_sustained(edge):\n",
|
|
" source = list(edge[0])\n",
|
|
" ordered_source = sorted(source, key=hs_array_to_fr) \n",
|
|
" destination = [transpose_pitch(edge[2]['movements'][p]['destination'], edge[2]['transposition']) for p in source]\n",
|
|
" ordered_destination = sorted(destination, key=hs_array_to_fr)\n",
|
|
" return ordered_source[voice] == ordered_destination[voice]\n",
|
|
"\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if is_sustained(e) else 0\n",
|
|
"\n",
|
|
" def voice_crossing_weights(edges):\n",
|
|
" \n",
|
|
" def has_voice_crossing(edge):\n",
|
|
" source = list(edge[0])\n",
|
|
" ordered_source = sorted(source, key=hs_array_to_fr) \n",
|
|
" source_order = [ordered_source.index(p) for p in source]\n",
|
|
" destination = [transpose_pitch(edge[2]['movements'][p]['destination'], edge[2]['transposition']) for p in source]\n",
|
|
" ordered_destination = sorted(destination, key=hs_array_to_fr)\n",
|
|
" destination_order = [ordered_destination.index(p) for p in destination]\n",
|
|
" return source_order != destination_order\n",
|
|
"\n",
|
|
" for e in edges:\n",
|
|
" yield 10 if not has_voice_crossing(e) else 0\n",
|
|
"\n",
|
|
" def is_bass_rooted(chord):\n",
|
|
" return max([sum(abs(p) for p in collapse_pitch(pitch_difference(chord[0], p))) for p in chord[1:]]) == 1\n",
|
|
" \n",
|
|
" check_graph = graph.copy()\n",
|
|
" #next_node = choice([node for node in graph.nodes() if is_bass_rooted(node)])\n",
|
|
" next_node = choice(list(graph.nodes()))\n",
|
|
" print(next_node)\n",
|
|
" check_graph.remove_node(next_node)\n",
|
|
" for node in graph.nodes(data=True):\n",
|
|
" node[1]['count'] = 1\n",
|
|
" path = []\n",
|
|
" while (nx.number_of_nodes(check_graph) > 0) and (len(path) < 500):\n",
|
|
" out_edges = list(graph.out_edges(next_node, data=True))\n",
|
|
" #print([l for l in zip(movement_size_weights(out_edges), hamiltonian_weights(out_edges))])\n",
|
|
" factors = [\n",
|
|
" movement_size_weights(out_edges), \n",
|
|
" hamiltonian_weights(out_edges), \n",
|
|
" #contrary_motion_weights(out_edges), \n",
|
|
" #is_directly_tunable_weights(out_edges),\n",
|
|
" voice_crossing_weights(out_edges),\n",
|
|
" #transposition_weight(out_edges)\n",
|
|
" #is_sustained_voice(out_edges, 0)\n",
|
|
" ]\n",
|
|
" weights = [prod(a) for a in zip(*factors)]\n",
|
|
" #print(weights)\n",
|
|
" edge = choices(out_edges, weights=weights)[0]\n",
|
|
" #print(edge)\n",
|
|
" #edge = random.choice(out_edges)\n",
|
|
" next_node = edge[1]\n",
|
|
" node[1]['count'] += 1\n",
|
|
" path.append(edge)\n",
|
|
" if next_node in check_graph.nodes:\n",
|
|
" check_graph.remove_node(next_node)\n",
|
|
" print(len(check_graph.nodes()))\n",
|
|
" return path\n",
|
|
" \n",
|
|
"path = stochastic_hamiltonian(graph)\n",
|
|
"#for edge in path:\n",
|
|
"# print(edge)\n",
|
|
"write_chord_sequence(path_to_chords(path, root))\n",
|
|
"len(path)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 212,
|
|
"id": "7b76d848-fe53-4b60-b414-46cfe570f78b",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"((4, 0, 0, 0, -1), (2, -1, 0, 0, 0), (0, 0, 0, 0, 0))\n",
|
|
"0\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"219"
|
|
]
|
|
},
|
|
"execution_count": 212,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"path = stochastic_hamiltonian(graph)\n",
|
|
"#for edge in path:\n",
|
|
"# print(edge)\n",
|
|
"write_chord_sequence(path_to_chords(path, root))\n",
|
|
"len(path)"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.11.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|