Skip to content
Snippets Groups Projects
Commit b5c360da authored by Münker's avatar Münker
Browse files

Transfer from public branch.

parents
Branches master
No related tags found
No related merge requests found
Showing
with 1747 additions and 0 deletions
File added
File added
# -*- coding: utf-8 -*-
"""
Script to generate AND/OR graph from moving wedge (MW) and liaisons matrix.
"""
import cProfile
import pickle
import pstats
import os
import pandas as pd
import hypernetx as hnx
import time
from copy import deepcopy
num_edges = 0
hyper = {}
hyper_str = {}
def run_experiment_csv(read_path, write_path, product_name, restrict_nonstat_size, max_nonstat_parts, save_AOG=True):
liaisons_file = read_path + product_name + "_Liaisons.csv"
mw_path_x = read_path + product_name + "_Moving wedge_x.csv"
mw_path_y = read_path + product_name + "_Moving wedge_y.csv"
mw_path_z = read_path + product_name + "_Moving wedge_z.csv"
# ID of base part
global base_part_id
base_part_id = 1
# Read files
liaison_df = pd.read_csv(liaisons_file, index_col=0)
mw_dfs = read_mw_data_csv(mw_path_x, mw_path_y, mw_path_z)
global prohibited_sa
prohibited_sa = []
# Initialise product as a list of all part indices
prod = list(range(1, len(liaison_df) + 1))
# Start recursive AND/OR graph generation
start = time.perf_counter()
and_or(prod, liaison_df, mw_dfs, restrict_nonstat_size, max_nonstat_parts)
end = time.perf_counter()
runtime = end - start
#H = hnx.Hypergraph(hyper_str)
#node_count = len(H.nodes)
#edge_count = len(H.edges)
if save_AOG == True:
write_file_path = os.path.join(write_path, product_name + '_AOG.pickle')
with open(write_file_path, 'wb') as f:
pickle.dump(hyper, f)
return {"exp:": product_name, "runtime": runtime, "node_count": node_count, "edge_count": edge_count}
def run_experiment_xlsx(read_path, write_path, product_name, restrict_nonstat_size, max_nonstat_parts, save_AOG=True, reduced=False):
liaisons_file = os.path.join(read_path, product_name + "_Liaisons.xlsx")
mw_path = os.path.join(read_path, product_name + "_Moving wedge.xlsx")
if reduced:
liaisons_file = os.path.join(read_path, product_name + "_Liaisons_reduced.xlsx")
mw_path = os.path.join(read_path, product_name + "_Moving wedge_reduced.xlsx")
# ID of base part
global base_part_id
base_part_id = 1
# Read files
liaison_df = read_liaison_data_excel(liaisons_file)
mw_dfs = read_mw_data_excel(mw_path)
global prohibited_sa
prohibited_sa = []
# Initialise product as a list of all part indices
prod = list(range(1, len(liaison_df) + 1))
# Start recursive AND/OR graph generation
start = time.perf_counter()
and_or(prod, liaison_df, mw_dfs, restrict_nonstat_size, max_nonstat_parts)
end = time.perf_counter()
runtime = end - start
H = hnx.Hypergraph(hyper_str)
node_count = len(H.nodes)
edge_count = len(H.edges)
#node_count = 0
#edge_count = 0
if save_AOG == True:
write_file_path = os.path.join(write_path, product_name + '_AOG.pickle')
with open(write_file_path, 'wb') as f:
pickle.dump(hyper, f)
return {"exp:": product_name, "runtime": runtime, "node_count": node_count, "edge_count": edge_count}
def and_or(prod, liaison_df, mw_dfs, restrict_nonstat_size, max_nonstat_parts):
global dirs
global num_edges
global hyper, hyper_str
# convert liaison_df to liaison_numpy for speed purposes
liaison_matrix = liaison_df.to_numpy()
#global dirs
mw_x = mw_dfs['MW_x'].to_numpy()
mw_y = mw_dfs['MW_y'].to_numpy()
mw_z = mw_dfs['MW_z'].to_numpy()
mw = [mw_x, mw_y, mw_z]
#global restrict_nonstat_size, max_nonstat_parts
# List of created subassemblies
sa_list = [[p] for p in prod]
# The smallest index of SA2 in sa_list (to avoid redundant operations)
ind2_start = 0
# Level is equal to the number of parts in SAs on this level
for level in range(2, len(prod)+1):
print(f'Level {level}/{len(prod)}')
num_iter = 0
num_no_intersect = 0
num_connect = 0
num_free = 0
num_new_sa = 0
num_new_edges = 0
temp_sa_list = deepcopy(sa_list)
for ind1 in range(len(sa_list)):
for ind2 in range(ind2_start, len(sa_list)):
if ind1 < ind2:
num_iter += 1
sa1 = sa_list[ind1]
sa2 = sa_list[ind2]
l1 = len(sa1)
l2 = len(sa2)
if restrict_nonstat_size:
# check whether the size of subassembly without base part is admissible
if is_stationary(sa1):
if l2 > max_nonstat_parts:
continue
elif is_stationary(sa2):
if l1 > max_nonstat_parts:
continue
else:
if l1 + l2 > max_nonstat_parts:
continue
if l1 + l2 <= len(prod):
if not intersection(sa1, sa2):
num_no_intersect += 1
new_sa = sorted(sa1 + sa2)
# if new_sa is already in sa_list, then it was created
# on a lower level, don't add new operations
#if new_sa in sa_list:
# continue
if connected_subassemblies(sa1, sa2, liaison_matrix):
num_connect += 1
if collision_free_assembly(sa1, sa2, mw):
num_free += 1
# Check whether this new SA creates any problems for
# higher-level operations
#todo: check if this code is logically correct...
#if not sa_prevents_future_assembly(new_sa, prod, mw):
num_edges += 1
num_new_edges += 1
# Save hyperedge
hyper[num_edges] = (new_sa, sa1, sa2)
hyper_str[num_edges] = (str(new_sa), str(sa1), str(sa2))
#print(f'hyper_str: {hyper_str}')
if new_sa not in temp_sa_list:
num_new_sa += 1
temp_sa_list.append(new_sa)
ind2_start = len(sa_list)
sa_list = deepcopy(temp_sa_list)
# Beautify: sort key-value pairs by decreasing size of product subassembly
# print('Reformatting AND/OR graph')
sorted_edges = sorted(hyper.values(), key=lambda x: len(x[0]), reverse=True)
hyper = {i: sorted_edges[i-1] for i in range(1, len(sorted_edges)+1)}
def read_mw_data_csv(mw_path_x, mw_path_y, mw_path_z):
mw_x_df = pd.read_csv(mw_path_x)
mw_y_df = pd.read_csv(mw_path_y)
mw_z_df = pd.read_csv(mw_path_z)
return {'MW_x': mw_x_df, 'MW_y': mw_y_df, 'MW_z': mw_z_df}
def read_mw_data_excel(mw_path):
mw_xl_file = pd.ExcelFile(mw_path)
mw_dfs = {sheet_name: mw_xl_file.parse(sheet_name, header=None, index_col=None).astype('int32')
for sheet_name in mw_xl_file.sheet_names}
return mw_dfs
def read_liaison_data_excel(liaison_path):
liaison_xl_file = pd.ExcelFile(liaison_path)
liaison_df = liaison_xl_file.parse('Liaison Matrix', header=None, index_col=None)
liaison_df.fillna(value=0, inplace=True)
liaison_df = liaison_df.astype('int32')
return liaison_df
# Checks whether there is at least 1 liaison between two subassemblies.
# The subassemblies themselves are assumed to be connected.
def connected_subassemblies(sa1, sa2, liaison_matrix):
'''
prt = sa1 + sa2
if is_stable(prt, liaison_df) == True:
return True
'''
for p1 in sa1:
for p2 in sa2:
if liaison_matrix[p1-1][p2-1] == 1:
return True
return False
def is_stable(prt, liaison_df):
# List of visited nodes
vis = [False for x in range(len(prt))]
# Submatrix of LG
prt_idx = [p - 1 for p in prt]
lg_df = liaison_df.iloc[prt_idx, prt_idx]
lg = lg_df.to_numpy()
# DFS to explore the graph from the first node
dfs(lg, vis, 0)
# All subassembly parts must be visited via liaisons
for i in range(len(vis)):
if vis[i] == False:
return False
return True
def dfs(lg, vis, v):
if vis[v] == True:
return
vis[v] = True
# for all neighbors u of v
for u in [i for i in range(len(vis)) if lg[v][i] == 1]:
if vis[u] == False:
dfs(lg, vis, u)
# check whether disassembly is possible by checking for
# collision-free assembly paths of one of two subsets along all axes
def collision_free_assembly(sa1, sa2, mw):
# todo: make this algorithm faster: precompute mw to numpy
l1 = len(sa1)
l2 = len(sa2)
for i in range(6):
checksum = 0
if i < 3:
mat = mw[i]
for j in sa1:
for k in sa2:
checksum = checksum + mat[j - 1][k - 1]
else:
mat = mw[i-3]
for j in sa1:
for k in sa2:
checksum = checksum + mat[k - 1][j - 1]
if checksum == l1 * l2:
return True
return False
def sa_prevents_future_assembly(sa, prod, mw):
global prohibited_sa
if sa in prohibited_sa:
return True
if sa == prod:
return False
for part in prod:
if part not in sa:
appended_part = [part]
if not collision_free_assembly(sa, appended_part, mw):
#print(str(appended_part))
prohibited_sa.append(sa)
return True
return False
def product_subassembly_unused(sa, and_or_graph):
for k in and_or_graph.keys():
if and_or_graph[k][1] == sa or and_or_graph[k][2] == sa:
return False
return True
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
def is_stationary(prt):
global base_part_id
if base_part_id in prt:
return True
else:
return False
if __name__ == '__main__':
# AND/OR hypergraph as a dictionary
num_edges = 0
hyper = {}
hyper_str = {} # for visualization
read_path = '../../data/mw_liaison_csv_generated/screening/'
write_path = '../../data/and_or_pickles/generated_screening/'
result_df = pd.DataFrame()
pr = cProfile.Profile()
pr.enable()
result = run_experiment_csv(read_path=read_path,
write_path=write_path,
product_name='pre_exp_1',
restrict_nonstat_size=False,
max_nonstat_parts=3)
pr.disable()
stats = pstats.Stats(pr)
stats.sort_stats(pstats.SortKey.TIME)
#stats.print_stats()
stats.dump_stats(filename='aog_bottom_up.prof')
result_df = result_df.append(result, ignore_index=True)
print(result_df)
'''
for i in range(1, 16):
result_df = pd.DataFrame()
result = run_experiment(read_path=read_path, write_path=write_path, product_name='exp_' + str(i), restrict_nonstat_size=False, max_nonstat_parts=3)
result_df = result_df.append(result, ignore_index=True)
with open('../out/performance_test_results/doe_2_bottom_up_results.csv', 'a') as f:
result_df.to_csv(f, header=False, index=False)
del result_df
print(i, ' done.')
'''
import networkx as nx
import pickle
import random
from matplotlib import pyplot as plt
from src.validation.evaluation_calculation import calc_number_of_possible_sequences
'''
graph = {1: ([1, 2, 3, 4, 5, 6], [1, 2, 4, 5, 6], [3]),
2: ([1, 2, 4, 5, 6], [1, 2, 5, 6], [4]),
3: ([1, 2, 4, 5, 6], [1, 4, 5], [2, 6]),
4: ([1, 2, 5, 6], [1, 5], [2, 6]),
5: ([1, 4, 5], [1, 5], [4]),
6: ([1, 4, 5], [1, 4], [5]),
7: ([1, 4], [1], [4]),
8: ([1, 5], [1], [5]),
9: ([2, 6], [2], [6])}
'''
ioc_demo_graph = {
1: (['be001', 'be002', 'be003', 'be004', 'be005', 'be006'], ['be001', 'be002', 'be003', 'be006'], ['be004', 'be005']),
2: (['be001', 'be002', 'be003', 'be004', 'be005', 'be006'], ['be001', 'be002', 'be003', 'be004', 'be005'], ['be006']),
3: (['be001', 'be002', 'be003', 'be006'], ['be001', 'be002', 'be003'], ['be006']),
4: (['be001', 'be002', 'be003', 'be004', 'be005'], ['be001', 'be002', 'be003'], ['be004', 'be005']),
5: (['be001', 'be002', 'be003'], ['be001', 'be003'], ['be002']),
6: (['be001', 'be002', 'be003'], ['be001', 'be002'], ['be003']),
7: (['be004', 'be005'], ['be004'], ['be005']),
8: (['be001', 'be003'], ['be001'], ['be003']),
9: (['be001', 'be002'], ['be001'], ['be002'])
}
def open_aog_file(product_name):
pickle_path = '../../data/and_or_pickles/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
and_or_graph = pickle.load(input_file)
return and_or_graph
def get_assembly_tree_first_possible(graph):
flipped_dict = get_reduced_flipped_graph(graph)
reduced_graph = get_reduced_graph(flipped_dict, graph)
G = nx.DiGraph()
add_process_nodes(G, reduced_graph)
add_edges_to_graph(G, reduced_graph, flipped_dict)
remove_unconnected_nodes(G)
remove_duplicate_processes(G, reduced_graph)
add_start_node_with_edges(G)
return G
def get_monte_carlo_optimal_assembly_tree(graph, iterations, opt_goal='high_flex'):
#print(f'find assembly_tree with highest flexibility ({iterations} iterations)')
best_assembly_tree = get_assembly_tree_random(graph)
best_flex = calc_number_of_possible_sequences(best_assembly_tree)
for i in range(0, iterations):
assembly_tree = get_assembly_tree_random(graph).copy()
try:
flex = calc_number_of_possible_sequences(assembly_tree)
except:
# todo: Bugfix this error (Find out why for these graphs the num sequences could not be calced).
print('exception: Could not calc flex of graph')
if opt_goal == 'high_flex':
flex = 0
if opt_goal == 'low_flex':
flex = 9999999999999999
if opt_goal == 'high_flex':
if flex > best_flex:
best_assembly_tree = assembly_tree.copy()
best_flex = flex
if opt_goal == 'low_flex':
if flex < best_flex:
best_assembly_tree = assembly_tree.copy()
best_flex = flex
#print(f'Highest number of sequences: {best_flex}')
return best_assembly_tree
def get_assembly_tree_random(graph):
flipped_dict = get_random_reduced_flipped_graph(graph)
reduced_graph = get_reduced_graph(flipped_dict, graph)
G = nx.DiGraph()
add_process_nodes(G, reduced_graph)
add_edges_to_graph(G, reduced_graph, flipped_dict)
remove_unconnected_nodes(G)
remove_duplicate_processes(G, reduced_graph)
add_start_node_with_edges(G)
G.remove_edge('Start', 'Start')
return G
def get_reduced_flipped_graph(graph):
flipped_dict = {}
for key, value in graph.items():
assembly = value[0]
assembly = str(assembly)
if assembly not in flipped_dict:
flipped_dict[assembly] = key
return flipped_dict
def get_random_reduced_flipped_graph(graph):
duplicate_keys = find_duplicate_keys(graph)
picked_keys = pick_random_key(duplicate_keys)
flipped_dict = {}
for key, value in graph.items():
assembly = str(value[0])
if assembly not in flipped_dict:
if assembly in picked_keys:
picked_key = picked_keys[assembly]
flipped_dict[assembly] = picked_key
else:
flipped_dict[assembly] = key
return flipped_dict
def find_duplicate_keys(graph):
rev_multidict = {}
for key, value in graph.items():
rev_multidict.setdefault(str(value[0]), set()).add(key)
duplicate_values = [key for key, values in rev_multidict.items() if len(values) > 1]
duplicate_keys = {}
for key, value in graph.items():
if str(value[0]) in duplicate_values:
duplicate_keys.setdefault(str(value[0]), set()).add(key)
return duplicate_keys
def pick_random_key(duplicate_keys):
picked_keys = {}
for key, value in duplicate_keys.items():
picked_value = random.choice(list(value))
picked_keys[key] = picked_value
return picked_keys
def get_reduced_graph(flipped_dict, graph):
reduced_graph = {}
for key, value in flipped_dict.items():
reduced_graph[value] = graph[value]
return reduced_graph
def add_process_nodes(G, reduced_graph):
for key, value in reduced_graph.items():
G.add_node(key, assembly=value[0], sub_1=value[1], sub_2=value[2])
def add_edges_to_graph(G, reduced_graph, flipped_dict):
for key, value in reduced_graph.items():
target_node = key
part_1 = value[1]
part_2 = value[2]
if len(part_1) > 1:
process_for_part_1 = flipped_dict[str(part_1)]
start_node = process_for_part_1
G.add_edge(start_node, target_node)
if len(part_2) > 1:
process_for_part_2 = flipped_dict[str(part_2)]
start_node = process_for_part_2
G.add_edge(start_node, target_node)
def remove_unconnected_nodes(G):
G.remove_nodes_from(list(nx.isolates(G)))
# remove duplicate processes
def remove_duplicate_processes(G, reduced_graph):
final_process = get_final_process(reduced_graph)
nodes_to_remove = search_nodes_to_remove(G, final_process)
while nodes_to_remove != []:
for node in nodes_to_remove:
G.remove_node(node)
nodes_to_remove = search_nodes_to_remove(G, final_process)
def search_nodes_to_remove(G, final_process):
nodes_to_remove = []
for node in G.nodes():
if G.out_degree(node) == 0:
if node != final_process:
nodes_to_remove.append(node)
return nodes_to_remove
def get_final_process(dict):
key = max(dict, key=lambda k: len(dict[k]))
return key
def add_start_node_with_edges(G):
G.add_node('Start')
for node in G.nodes():
if G.in_degree(node) == 0:
G.add_edge('Start', node)
def main():
product_name = 'ioc_demo'
and_or_graph = open_aog_file(product_name)
G = get_monte_carlo_optimal_assembly_tree(and_or_graph, iterations=100, opt_goal='high_flex')
nx.draw(G, with_labels=True)
plt.show()
pickle_path = '../../out/assembly_tree/' + product_name + '.pickle'
with open(pickle_path, 'wb') as handle:
pickle.dump(G, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
import numpy as np
import pandas as pd
import itertools
from src.abd_by_occ.basic_cad_functions.part_handling import remove_fasteners, rename_shapes_with_numbers
from OCC.Core.GProp import GProp_GProps
from OCC.Extend.DataExchange import read_step_file_with_names_colors
from OCC.Core.BRepMesh import BRepMesh_IncrementalMesh
from OCC.Core.BRepExtrema import BRepExtrema_ShapeProximity,BRepExtrema_ShapeList,BRepExtrema_TriangleSet
from OCC.Extend.ShapeFactory import translate_shp
from OCC.Core.gp import gp_Pnt, gp_Vec
props = GProp_GProps()
product_name = 'centrifugal_pump'
FILEPATH = '../../examples/stepFiles/' + product_name + '.stp'
def transpose_add(df_array):
df_array = df_array + df_array.T - np.diag(np.diag(df_array))
return df_array
def proximity(shapes_object,make_csv=False, export_name= product_name + '.csv'):
names = []
for shape in shapes_object:
label, _ = shapes_object[shape]
names.append(label)
contacts = np.zeros(shape=[len(names),len(names)])
print("Making array of shape:",contacts.shape)
for i, j in itertools.combinations(np.arange(len(list(shapes_object.keys()))), 2):
if i>j:
print(i,j)
shape_a = list(shapes_object.keys())[i]
shape_b = list(shapes_object.keys())[j]
aMesher = BRepMesh_IncrementalMesh(shape_a, 1.0)
aMesher = BRepMesh_IncrementalMesh(shape_b, 1.0)
proximity = BRepExtrema_ShapeProximity(shape_a, shape_b)
proximity.Perform()
shapes_in_contact_a = []
shapes_in_contact_b = []
if proximity.IsDone():
subs1 = proximity.OverlapSubShapes1().Keys()
subs2 = proximity.OverlapSubShapes2().Keys()
for sa in subs1:
temp = translate_shp(proximity.GetSubShape1(sa), gp_Vec(0, 0, 5))
shapes_in_contact_a.append(temp)
for sa in subs2:
temp = translate_shp(proximity.GetSubShape2(sa), gp_Vec(0, 0, 5))
shapes_in_contact_b.append(temp)
if (len(shapes_in_contact_a) and len(shapes_in_contact_b)):
if j>i:
contacts[i,j] = 1
else:
print(i,j)
if make_csv:
print("Preparing dataframe")
contacts = transpose_add(contacts)
df_contact = pd.DataFrame(contacts,index=names,columns=names)
df_contact.to_csv('../../out/contact_matrix/' + export_name)
return contacts
if __name__ == "__main__":
shapes_names_colors = read_step_file_with_names_colors(FILEPATH)
shapes_names_colors = rename_shapes_with_numbers(shapes_names_colors)
shapes_names_colors = remove_fasteners(shapes_names_colors)
proximity(shapes_names_colors,make_csv=True)
from src.graph_utilities.graph_builder import *
myid = ID_Generator()
def create_mapping_of_names_to_nodes(assembly_tiers):
bill_of_materials = []
for tier in assembly_tiers:
for comp in tier:
bill_of_materials.append(comp)
mapping = {}
for i, comp in enumerate(bill_of_materials):
mapping[i] = comp
return mapping
def create_subset_sizes(assembly_tiers):
subset_sizes = []
for tier in assembly_tiers:
subset_sizes.append(len(tier))
return subset_sizes
def create_contact_graph(contact_matrix_df):
g = nx.from_numpy_matrix(contact_matrix_df.values, parallel_edges=True, create_using=nx.Graph())
label_mapping = {idx: val for idx, val in enumerate(contact_matrix_df.columns)}
g = nx.relabel_nodes(g, label_mapping)
return g
def create_precedence_graph(contact_graph, assembly_tiers):
G = nx.DiGraph()
for tier in range(len(assembly_tiers)):
for part in assembly_tiers[tier]:
G.add_node(part, tier=tier)
for u,v,a in contact_graph.edges(data=True):
#print(u, G.nodes[u]['tier'], v, G.nodes[v]['tier'])
if (G.nodes[u]['tier'] - G.nodes[v]['tier'] == -1):
G.add_edge(u,v)
if (G.nodes[v]['tier'] - G.nodes[u]['tier'] == -1):
G.add_edge(v,u)
return G
def create_multilayered_graph(*subset_sizes):
extents = pairwise(itertools.accumulate((0,) + subset_sizes))
layers = [range(start, end) for start, end in extents]
G = nx.DiGraph()
for (i, layer) in enumerate(layers):
G.add_nodes_from(layer, layer=i)
for layer1, layer2 in pairwise(layers):
G.add_edges_from(itertools.product(layer1, layer2))
return G
def parts_not_connected(contact_matrix_df, u, v):
if contact_matrix_df.loc[u, v] == 0:
return True
else:
return False
# -------------------------
# Decision tree (AOG -> PG)
# -------------------------
def get_decision_tree(AOG, flipped_reduced_AOG, flipped_duplicates, simple_nodes):
decision_tree = nx.DiGraph()
decision_tree = add_nodes_with_edges(decision_tree, AOG, flipped_reduced_AOG, flipped_duplicates)
decision_tree = add_start_node_with_edges(decision_tree, flipped_duplicates)
remove_all_simple_nodes_from_end(decision_tree, simple_nodes)
return decision_tree
def add_nodes_with_edges(decision_tree, AOG, flipped_reduced_AOG, flipped_duplicates):
max_num_keys_of_AOG = len(AOG)
print('add_nodes_with_edges:')
for key, value in AOG.items():
#print(f'add_nodes_with_edges: {key}/{max_num_keys_of_AOG}', end="\r", flush=True)
sys.stdout.write('\r' + str(key) + '/' + str(max_num_keys_of_AOG)+ ' len_of_decision_tree: ' + str(len(decision_tree)))
sys.stdout.flush()
u_label = key
sub_1 = value[1]
sub_2 = value[2]
if len(sub_1) > 1:
add_simple_edge_to_decision_tree(sub_1, flipped_reduced_AOG, decision_tree, u_label)
add_decision_edges_to_decision_tree(sub_1, flipped_duplicates, decision_tree, u_label)
if len(sub_2) > 1:
add_simple_edge_to_decision_tree(sub_2, flipped_reduced_AOG, decision_tree, u_label)
add_decision_edges_to_decision_tree(sub_2, flipped_duplicates, decision_tree, u_label)
return decision_tree
def add_start_node_with_edges(decision_tree, flipped_duplicates):
start_node_id = myid.generate_id()
decision_tree.add_node(start_node_id, label='Start')
first_key = list(flipped_duplicates.keys())[0]
# get uuids of nodes with first_key
uuids_of_first_key = get_uuids_of_first_key(decision_tree, first_key, flipped_duplicates)
for node in uuids_of_first_key:
decision_tree.add_edge(start_node_id, node)
return decision_tree
def get_uuids_of_first_key(decision_tree, first_key, flipped_duplicates):
uuids_of_first_key = []
for val in flipped_duplicates[first_key]:
for node, data in decision_tree.nodes(data=True):
if data['label'] == val:
uuids_of_first_key.append(node)
return uuids_of_first_key
def remove_all_simple_nodes_from_end(decision_tree, simple_nodes):
nodes_to_remove = get_simple_nodes_to_remove(decision_tree, simple_nodes)
while nodes_to_remove != []:
for node in nodes_to_remove:
decision_tree.remove_node(node)
nodes_to_remove = get_simple_nodes_to_remove(decision_tree, simple_nodes)
def get_simple_nodes_to_remove(decision_tree, simple_nodes):
nodes_to_remove = []
for node, data in decision_tree.nodes(data=True):
if decision_tree.out_degree(node) == 0:
if data['label'] in simple_nodes:
nodes_to_remove.append(node)
return nodes_to_remove
def add_simple_edge_to_decision_tree(sub, flipped_reduced_AOG, decision_tree, u_label):
if str(sub) in list(flipped_reduced_AOG.keys()):
existing_u = append_existing_u(decision_tree, u_label)
if existing_u == []:
u = myid.generate_id()
decision_tree.add_node(u, label=u_label)
existing_u.append(u)
for u_elem in existing_u:
process_for_sub = flipped_reduced_AOG[str(sub)]
v = process_for_sub
node_id = myid.generate_id()
decision_tree.add_node(node_id, label=v)
decision_tree.add_edge(u_elem, node_id)
def add_decision_edges_to_decision_tree(sub, flipped_duplicates, decision_tree, u_label):
if str(sub) in list(flipped_duplicates.keys()):
existing_u = append_existing_u(decision_tree, u_label)
if existing_u == []:
u = myid.generate_id()
decision_tree.add_node(u, label=u_label)
existing_u.append(u)
processes_for_sub = flipped_duplicates[str(sub)]
for process in processes_for_sub:
for u_elem in existing_u:
v = process
node_id = myid.generate_id()
decision_tree.add_node(node_id, label=v)
decision_tree.add_edge(u_elem, node_id)
def append_existing_u(decision_tree, u_label):
existing_u = []
for node, data in decision_tree.nodes(data=True):
if data['label'] == u_label:
existing_u.append(node)
return existing_u
# ----------------------------------------------------------------------
# PARSE TREE (PG -> block set theory, calc number of possible sequences)
# ----------------------------------------------------------------------
def build_parse_tree(assembly_tree, sorted_node_list):
parse_tree = nx.DiGraph()
add_nodes_with_inbetween_s_nodes(assembly_tree, parse_tree, sorted_node_list)
#pos = hierarchy_pos(parse_tree, 's_INIT')
#nx.draw(parse_tree, pos, with_labels=True)
#plt.show()
remove_unnecessary_s_nodes(parse_tree)
remove_unnecessary_p_nodes(parse_tree)
return parse_tree
def add_nodes_with_inbetween_s_nodes(assembly_tree, parse_tree, sorted_node_list):
for node in sorted_node_list:
if node == 'Start':
add_nodes_after_serial_node(assembly_tree, parse_tree, node, 'INIT')
else:
parent_nodes = list(assembly_tree.predecessors(node))
if len(parent_nodes) == 1:
add_p_s_nodes_simple(parent_nodes, assembly_tree, node, parse_tree)
if len(parent_nodes) > 1:
add_p_s_nodes_with_multiple_successors(parent_nodes, assembly_tree, parse_tree, node)
#pos = hierarchy_pos(parse_tree, 's_INIT')
#nx.draw(parse_tree, pos, with_labels=True)
#plt.show()
def add_p_s_nodes_simple(parent_nodes, assembly_tree, node, parse_tree):
for parent_node in parent_nodes:
if parent_node_is_fork(assembly_tree, node) == False:
add_nodes_after_serial_node(assembly_tree, parse_tree, node, parent_node)
if parent_node_is_fork(assembly_tree, node) == True:
add_nodes_after_parallel_node(assembly_tree, parse_tree, node, parent_node)
def add_p_s_nodes_with_multiple_successors(parent_nodes, assembly_tree, parse_tree, node):
root_node = get_local_root_of_sequence(assembly_tree, parent_nodes)
# 2. Add this node to the "s_node" of the root
parse_predecessor = list(parse_tree.predecessors(root_node))[0]
# remove possible previous connections
parse_tree.remove_edge(parse_predecessor, root_node)
p_string_root_node = 'p_' + str(root_node)
if parse_tree.has_edge(parse_predecessor, p_string_root_node):
parse_tree.remove_edge(parse_predecessor, p_string_root_node)
# add edges to additional s_node
s_string_u = 's_' + str(root_node)
s_string_v = 's_' + str(node)
parse_tree.add_edge(s_string_u, root_node)
parse_tree.add_edge(s_string_u, s_string_v)
parse_tree.add_edge(s_string_v, node)
parse_tree.add_edge(s_string_u, p_string_root_node)
if parse_predecessor != s_string_u:
parse_tree.add_edge(parse_predecessor, s_string_u)
#if len(list(parse_tree.predecessors(node))) > 1:
parse_tree.add_edge(s_string_v, 'p_' + str(node))
def get_local_root_of_sequence(assembly_tree, parent_nodes):
lca = lowest_common_ancestors(assembly_tree, parent_nodes)
#print(f'lca: {lca}')
if len(lca) == 1:
root_node = lca[0]
if len(lca) == 2:
lca_2 = lowest_common_ancestors(assembly_tree, lca)
root_node = lca_2[0]
else:
root_node = 'Start' #todo: make generic for multiple lcas
#print(f'root_node = {root_node}')
return root_node
def lowest_common_ancestors(assembly_tree, nodes):
# according to https://www.baeldung.com/cs/lowest-common-ancestor-acyclic-graph
# get ancestors of each node individually
ancestors = []
for i, node in enumerate(nodes):
visited = []
ancestors_of_node = []
dfs_get_all_ancestors(visited, ancestors_of_node, assembly_tree, node)
ancestors.append(ancestors_of_node)
# get common ancestors
common_ancestors = set(ancestors[0])
for s in ancestors[1:]:
common_ancestors.intersection_update(s)
# reduce graph to common ancestors
reduced_graph = assembly_tree.copy()
nodes_to_remove = []
for node in assembly_tree.nodes():
if node not in common_ancestors:
nodes_to_remove.append(node)
for node in nodes_to_remove:
reduced_graph.remove_node(node)
#nx.draw(reduced_graph, with_labels=True)
#plt.show()
# get leafs of reduced graph
leafs = []
for node in reduced_graph.nodes():
if reduced_graph.out_degree(node) == 0:
leafs.append(node)
if leafs == []:
leafs.append('Start')
return leafs
def dfs_get_all_ancestors(visited, ancestors_of_node, graph, node):
if node not in visited:
visited.append(node)
for predecessor in graph.predecessors(node):
ancestors_of_node.append(predecessor)
dfs_get_all_ancestors(visited, ancestors_of_node, graph, predecessor)
def get_prior_splitting_node(assembly_tree, node):
for predecessor in assembly_tree.predecessors(node):
if is_splitting_node(assembly_tree, predecessor):
return predecessor
else:
get_prior_splitting_node(assembly_tree, predecessor)
def is_splitting_node(assembly_tree, node):
if assembly_tree.out_degree(node) > 1:
return True
else:
return False
def all_equal(iterable):
g = groupby(iterable)
return next(g, True) and not next(g, False)
def remove_unnecessary_s_nodes(parse_tree):
# 1. two s_nodes after each other
handle = []
for node in parse_tree.nodes():
if 's_' in str(node) and node != 's_INIT':
parent = list(parse_tree.predecessors(node))[0]
if 's_' in str(parent):
ancestor_s_node = parent
ancestor_is_s_node = True
# 1. find root s_node
while ancestor_is_s_node:
if ancestor_s_node == 's_INIT':
break
predecessor = list(parse_tree.predecessors(ancestor_s_node))[0]
if 's_' in predecessor:
ancestor_s_node = predecessor
else:
ancestor_is_s_node = False
successors = parse_tree.successors(node)
handle.append((node, ancestor_s_node, successors))
for elem in handle:
parse_tree.remove_node(elem[0])
for successor in elem[2]:
parse_tree.add_edge(elem[1], successor)
# 2. s_node following after p_node, but s_node just has one outgoing node
handle2 = []
for node in parse_tree.nodes():
if 's_' in str(node) and node != 's_INIT':
if parse_tree.out_degree(node) == 1:
parent = list(parse_tree.predecessors(node))[0]
child = list(parse_tree.successors(node))[0]
handle2.append((parent, node, child))
for elem in handle2:
parse_tree.add_edge(elem[0], elem[2])
parse_tree.remove_node(elem[1])
def remove_unnecessary_p_nodes(parse_tree):
# sometimes nodes p nodes are left with only one successor. Then shift the p node to the previous serial node.
handle = []
for node in parse_tree.nodes():
if 'p_' in str(node):
if parse_tree.out_degree(node) == 1:
parent = list(parse_tree.predecessors(node))[0]
child = list(parse_tree.successors(node))[0]
handle.append((parent, node, child))
for elem in handle:
parse_tree.add_edge(elem[0], elem[2])
parse_tree.remove_node(elem[1])
def parent_node_is_fork(assembly_tree, node):
parent_nodes = assembly_tree.predecessors(node)
parent_is_fork_node = False
for p_node in parent_nodes:
if assembly_tree.out_degree(p_node) > 1:
parent_is_fork_node = True
#print(f'parent node {p_node} is fork node.')
return parent_is_fork_node
def add_nodes_after_serial_node(assembly_tree, parse_tree, node, parent_node):
if assembly_tree.out_degree(node) == 1:
s_string_u = 's_' + str(parent_node)
s_string_v = 's_' + str(node)
parse_tree.add_edge(s_string_u, s_string_v)
parse_tree.add_edge(s_string_v, node)
if assembly_tree.out_degree(node) > 1:
s_string_u = 's_' + str(parent_node)
parse_tree.add_edge(s_string_u, node)
p_string_v = 'p_' + str(node)
parse_tree.add_edge(s_string_u, p_string_v)
if assembly_tree.out_degree(node) == 0:
s_string_u = 's_' + str(parent_node)
parse_tree.add_edge(s_string_u, node)
def add_nodes_after_parallel_node(assembly_tree, parse_tree, node, parent_node):
if assembly_tree.out_degree(node) == 1:
p_string_u = 'p_' + str(parent_node)
s_string_v = 's_' + str(node)
parse_tree.add_edge(p_string_u, s_string_v)
parse_tree.add_edge(s_string_v, node)
if assembly_tree.out_degree(node) > 1:
p_string_u = 'p_' + str(parent_node)
s_string = 's_' + str(node)
parse_tree.add_edge(p_string_u, s_string)
parse_tree.add_edge(s_string, node)
p_string_v = 'p_' + str(node)
parse_tree.add_edge(s_string, p_string_v)
if assembly_tree.out_degree(node) == 0:
p_string_u = 'p_' + str(parent_node)
parse_tree.add_edge(p_string_u, node)
\ No newline at end of file
import networkx as nx
import matplotlib.pyplot as plt
import random
import operator
import pandas as pd
from itertools import chain
class ID_Generator:
def __init__(self):
self.id = 0
def generate_id(self):
self.id = self.id + 1
return self.id
def sort_by_values_len(dict):
dict_len = {key: len(value[0]) for key, value in dict.items()}
sorted_key_list = sorted(dict_len.items(), key=operator.itemgetter(1), reverse=True)
sorted_list = [{item[0]: dict[item [0]]} for item in sorted_key_list]
sorted_dict = {}
for i, entry in enumerate(sorted_list):
sorted_dict[i] = list(entry.values())[0]
return sorted_dict
def plot_decision_tree(decision_tree, color_map=[]):
labeldict = {}
for node, data in decision_tree.nodes(data=True):
try:
labeldict[node] = data['label']
except:
print('key error label')
try:
pos = hierarchy_pos(decision_tree)
except:
pos = nx.circular_layout(decision_tree)
nx.draw(decision_tree, pos, labels=labeldict, node_color=color_map, with_labels=True)
plt.show()
def get_color_map(decision_tree, simple_nodes):
color_map = []
for node, data in decision_tree.nodes(data=True):
if data['label'] in simple_nodes:
color_map.append('blue')
else:
color_map.append('red')
return color_map
def hierarchy_pos(G, root=None, width=1., vert_gap=0.2, vert_loc=0, xcenter=0.5):
'''
From Joel's answer at https://stackoverflow.com/a/29597209/2966723.
Licensed under Creative Commons Attribution-Share Alike
If the graph is a tree this will return the positions to plot this in a
hierarchical layout.
G: the graph (must be a tree)
root: the root node of current branch
- if the tree is directed and this is not given,
the root will be found and used
- if the tree is directed and this is given, then
the positions will be just for the descendants of this node.
- if the tree is undirected and not given,
then a random choice will be used.
width: horizontal space allocated for this branch - avoids overlap with other branches
vert_gap: gap between levels of hierarchy
vert_loc: vertical location of root
xcenter: horizontal location of root
'''
if not nx.is_tree(G):
raise TypeError('cannot use hierarchy_pos on a graph that is not a tree')
if root is None:
if isinstance(G, nx.DiGraph):
root = next(iter(nx.topological_sort(G))) # allows back compatibility with nx version 1.11
else:
root = random.choice(list(G.nodes))
def _hierarchy_pos(G, root, width=1., vert_gap=0.2, vert_loc=0, xcenter=0.5, pos=None, parent=None):
'''
see hierarchy_pos docstring for most arguments
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch. - only affects it if non-directed
'''
if pos is None:
pos = {root: (xcenter, vert_loc)}
else:
pos[root] = (xcenter, vert_loc)
children = list(G.neighbors(root))
if not isinstance(G, nx.DiGraph) and parent is not None:
children.remove(parent)
if len(children) != 0:
dx = width / len(children)
nextx = xcenter - width / 2 - dx / 2
for child in children:
nextx += dx
pos = _hierarchy_pos(G, child, width=dx, vert_gap=vert_gap,
vert_loc=vert_loc - vert_gap, xcenter=nextx,
pos=pos, parent=root)
return pos
return _hierarchy_pos(G, root, width, vert_gap, vert_loc, xcenter)
def init_assembly_tiers(assembly_tree_graph, initial_node='Start'):
tier_list = []
tier_list.append([initial_node])
current_tier = 0
while True:
next_tier_nodes = []
next_neighbor_nodes = get_next_neighbor_nodes(assembly_tree_graph, tier_list[current_tier])
if next_neighbor_nodes != []:
for node in next_neighbor_nodes:
if predecessors_already_visited(node, tier_list, assembly_tree_graph):
next_tier_nodes.append(node)
tier_list.append(next_tier_nodes)
current_tier += 1
else:
break
return tier_list
def init_assembly_tiers_multiple_start_nodes(assembly_tree_graph, initial_nodes):
tier_list = []
tier_list.append(initial_nodes)
current_tier = 0
while True:
next_tier_nodes = []
next_neighbor_nodes = get_next_neighbor_nodes(assembly_tree_graph, tier_list[current_tier])
if next_neighbor_nodes != []:
for node in next_neighbor_nodes:
if predecessors_already_visited(node, tier_list, assembly_tree_graph):
next_tier_nodes.append(node)
tier_list.append(next_tier_nodes)
current_tier += 1
else:
break
return tier_list
def update_at_df_with_tier_list(at_df, tier_list):
for i, tier in enumerate(tier_list):
for elem in tier:
at_df.loc[at_df.Product == elem, 'Assembly Tier'] = i
return at_df
def predecessors_already_visited(node, tier_list, G):
flatten_tier_list = list(chain.from_iterable(tier_list))
for predecessor in G.predecessors(node):
if predecessor in flatten_tier_list:
return True
else:
return False
def add_tier_to_nodes(assembly_tiers, graph):
for counter, tier in enumerate(assembly_tiers):
for elem in tier:
if elem in graph.nodes():
graph.nodes[elem]['tier'] = counter
def get_next_neighbor_nodes(g, current_tier_nodes):
next_tier_nodes = []
for node in current_tier_nodes:
for neighbor in g.neighbors(node):
if neighbor not in next_tier_nodes:
if neighbor != node:
next_tier_nodes.append(neighbor)
return next_tier_nodes
def get_topological_sort_list_by_assembly_tiers(graph, initial_node='Start'):
assembly_tiers = init_assembly_tiers(graph, initial_node)
add_tier_to_nodes(assembly_tiers, graph)
sorted_node_list = sorted(graph.nodes(), key=lambda n: graph.nodes[n]['tier'])
return sorted_node_list
def get_random_sequence_from_PG(graph):
num_nodes = len(graph.nodes()) - 1
assembly_sequence = []
possible_nodes = list(graph.successors('Start'))
while len(assembly_sequence) < num_nodes:
choice = random.choice(possible_nodes)
assembly_sequence.append(choice)
possible_nodes.remove(choice)
next_successors = graph.successors(choice)
for successor in next_successors:
if all_predecessors_already_visited(graph, assembly_sequence, successor):
possible_nodes.append(successor)
return assembly_sequence
def estimate_num_sequences(atg, max_iter):
unique_sequences = []
iter_counter = 0
while (iter_counter < max_iter):
random_sequence = get_random_sequence_from_PG(atg)
if random_sequence not in unique_sequences:
unique_sequences.append(random_sequence)
iter_counter += 1
return len(unique_sequences)
def all_predecessors_already_visited(graph, visited, node):
predecessors = graph.predecessors(node)
num_predecessors = len(list(predecessors))
true_counter = 0
for n in graph.predecessors(node):
if n in visited:
true_counter += 1
if true_counter == num_predecessors:
return True
else:
return False
from typing import Dict, Any
import networkx as nx
import pickle
import random
from matplotlib import pyplot as plt
def open_pg_file(product_name):
pickle_path = '../../out/precedence_graph/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
pg = pickle.load(input_file)
return pg
def get_initial_part(G):
initial_part = [n for n,d in G.in_degree() if d==0]
initial_part = initial_part[0]
return initial_part
def get_all_initial_parts(G):
initial_parts = [n for n, d in G.in_degree() if d == 0]
return initial_parts
def init_mapped_data_with_sub_2(G):
# get added parts (sub_2)
mapped_data = {}
process_id = 1
for node, data in G.nodes(data=True):
if data['layer'] != 0:
mapped_data[process_id] = {'sub_2': [node]}
process_id += 1
return mapped_data
def relabel_all_nodes(G):
# relabel all nodes with process index:
mapping = {old_label:new_label for new_label, old_label in enumerate(G.nodes())}
print('Switching names of nodes with:')
print(mapping)
G = nx.relabel_nodes(G, mapping)
mapping = {0:'Start'}
G = nx.relabel_nodes(G, mapping)
return G
def relabel_all_nodes_ignoring_start(G):
# relabel all nodes with process index:
mapping = {old_label:new_label for new_label, old_label in enumerate(G.nodes()) if old_label!='Start'}
print('Switching names of nodes with:')
print(mapping)
G = nx.relabel_nodes(G, mapping)
return G
def get_first_tier_node_info(G, mapped_data, initial_part):
for node, data in G.nodes(data=True):
if data['layer'] == 1:
sub_1 = [initial_part]
mapped_data[node]['sub_1'] = sub_1
sub_2 = mapped_data[node]['sub_2']
assembly = sub_1 + sub_2
mapped_data[node]['assembly'] = assembly
return mapped_data
def get_node_info_for_other_tiers(G, mapped_data):
for node, data in G.nodes(data=True):
if data['layer'] > 1:
print(f'node: {node, data}')
predecessors = [n for n in G.predecessors(node)]
# get parts from predecessors
sub_1 = []
for predecessor in predecessors:
print(f'predecessor: {predecessor}, mapped_data: {mapped_data[predecessor]}')
parts_to_add = mapped_data[predecessor]['assembly']
for part in parts_to_add:
if part not in sub_1:
sub_1.append(part)
mapped_data[node]['sub_1'] = sub_1
sub_2 = mapped_data[node]['sub_2']
assembly = sub_1 + sub_2
mapped_data[node]['assembly'] = assembly
return mapped_data
def add_mapped_data_to_nodes(G, mapped_data):
for key, value in mapped_data.items():
assembly = value['assembly']
sub_1 = value['sub_1']
sub_2 = value['sub_2']
G.nodes[key]['assembly'] = assembly
G.nodes[key]['sub_1'] = sub_1
G.nodes[key]['sub_2'] = sub_2
return G
def main():
product_name = 'Product2_from_Catia'
G = open_pg_file(product_name)
# Verifikation
for node, data in G.nodes(data=True):
print(node, data)
initial_part = get_initial_part(G)
mapped_data = init_mapped_data_with_sub_2(G)
G = relabel_all_nodes(G)
mapped_data = get_first_tier_node_info(G, mapped_data, initial_part)
mapped_data = get_node_info_for_other_tiers(G, mapped_data)
G = add_mapped_data_to_nodes(G, mapped_data)
# Verifikation
for node, data in G.nodes(data=True):
print(node, data)
pos = nx.multipartite_layout(G, subset_key='layer')
plt.figure(figsize=(8, 8))
nx.draw(G, pos, with_labels=True)
plt.axis("equal")
plt.show()
pickle_path = '../../out/assembly_tree/' + product_name + '_from_pg.pickle'
with open(pickle_path, 'wb') as handle:
pickle.dump(G, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
\ No newline at end of file
This diff is collapsed.
#
#
# DEPRECIATED: SKRIPT NOT WORKING, TOO COMPLEX --> refer to aog_to_assembly_tree.py
#
#
import pickle
import random
import networkx as nx
from matplotlib import pyplot as plt
def create_random_assembly_tree(process_graph):
G = nx.DiGraph()
G.add_node('S')
decision_nodes = get_decision_nodes_with_multiple_out(process_graph)
print(f'decision_nodes: {decision_nodes}')
simple_nodes = [node for node in process_graph.nodes() if (node not in decision_nodes) and (('S' or 's') not in node)]
print(f'simple_nodes: {simple_nodes}')
for u,v,a in process_graph.edges(data=True):
print(u, v, a)
if a['edge_type'] == 'real':
G.add_node(a['label'], assembly_parts=a['assembly_parts'])
if a['source_node_type'] == 'start':
G.add_edge('S', a['label'])
print(f'u_node = {u}')
print(f'v_node = {v}')
u_process = a['label']
print(f'u_process = {u_process}')
successors = list(process_graph.successors(v))
print(f'successors = {successors}')
if successors != []:
if 'r' in v:
picked_successor = random.choice(successors)
print(f'picked successor = {picked_successor}')
v_process = process_graph[v][picked_successor]['label']
print(f'v_process = {v_process}')
if u_process and v_process in G:
G.add_edge(u_process, v_process)
if 's' in v:
for successor in successors:
v_process = process_graph[v][successor]['label']
if u_process and v_process in G:
G.add_edge(u_process, v_process)
if a['edge_type'] == 'dummy':
return G
def create_optimal_assembly_tree(process_graph):
#todo: use gurobi solver from Mikhail
pass
def get_decision_nodes_with_multiple_out(process_graph):
decision_nodes = []
for n in process_graph.nodes():
successors = list(process_graph.successors(n))
if 'r' in n:
if len(successors) > 1:
decision_nodes.append(n)
return decision_nodes
def main():
# Load process graph
product_name = 'simple_example'
pickle_path = '../../out/process_graph/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
process_graph = pickle.load(input_file)
assembly_tree = create_random_assembly_tree(process_graph)
nx.draw(assembly_tree, with_labels=True)
plt.show()
if __name__ == '__main__':
main()
\ No newline at end of file
import pickle
product_name = 'centrifugal_pump'
pickle_path = '../../data/and_or_pickles/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
and_or_graph = pickle.load(input_file)
print(and_or_graph)
translation_dict = {
1: 0,
2: 1,
3: 2,
4: 3,
5: 5,
6: 6,
7: 7,
8: 18,
9: 19,
10: 20,
11: 21,
12: 22,
}
def translate_array(array, translation_dict):
t_array = []
for elem in array:
t_array.append(translation_dict[elem])
return t_array
translated_graph = {}
for key, value in and_or_graph.items():
t_assembly = translate_array(value[0], translation_dict)
t_sub_1 = translate_array(value[1], translation_dict)
t_sub_2 = translate_array(value[2], translation_dict)
t_value = (t_assembly, t_sub_1, t_sub_2)
translated_graph[key] = t_value
print(translated_graph)
pickle_path = '../../data/and_or_pickles/' + product_name + '_translated.pickle'
with open(pickle_path, 'wb') as handle:
pickle.dump(translated_graph, handle, protocol=pickle.HIGHEST_PROTOCOL)
\ No newline at end of file
File added
File added
File added
File added
File added
# Tool to generate artificial data for performance analysis
import numpy as np
import pandas as pd
from networkx.generators.random_graphs import erdos_renyi_graph
from networkx import convert_matrix
def calc_liaisons_count(liaison_df):
return liaison_df.to_numpy().sum()/2
def calc_MWF(mw_df):
return mw_df.to_numpy().sum() / (len(mw_df.index)*len(mw_df.index))
def generate_liaison_csv_file(path, product_name, n_parts, p_liaisons):
save_path = path + product_name + '_Liaisons.csv'
liaison_df = generate_liaison_df(n_parts, p_liaisons)
liaison_df.to_csv(save_path)
def generate_liaison_df(n_parts, p_liaisons):
#p = n_liaisons * 2 / (n_parts * n_parts - n_parts)
p = p_liaisons
g_liaisons = erdos_renyi_graph(n_parts, p)
liaison_numpy = convert_matrix.to_numpy_matrix(g_liaisons)
liaison_numpy = liaison_numpy.astype(int)
liaison_df = pd.DataFrame(liaison_numpy)
return liaison_df
def generate_mw_csv_files(path, product_name, n_parts, MWF):
path_x = path + product_name + '_Moving wedge_x.csv'
path_y = path + product_name + '_Moving wedge_y.csv'
path_z = path + product_name + '_Moving wedge_z.csv'
mw_df_x = generate_mw_matrix(n_parts, MWF)
mw_df_y = generate_mw_matrix(n_parts, MWF)
mw_df_z = generate_mw_matrix(n_parts, MWF)
mw_df_x.to_csv(path_x)
mw_df_y.to_csv(path_y)
mw_df_z.to_csv(path_z)
def generate_mw_matrix(n_parts, MWF):
#mw_matrix = np.zeros((n_parts, n_parts))
mw_matrix = np.random.choice([0,1], size=(n_parts, n_parts), p=[1-MWF, MWF])
mw_df = pd.DataFrame(mw_matrix)
return mw_df
def generate_files(path, product_name, n_parts, p_liaisons, mwf):
generate_liaison_csv_file(path=path, product_name=product_name, n_parts=n_parts, p_liaisons=p_liaisons)
generate_mw_csv_files(path=path, product_name=product_name, n_parts=n_parts, MWF=mwf)
if __name__ == '__main__':
#DOE2 Box-Behnken
path = '../../data/mw_liaison_csv_generated/screening/'
generate_files(path=path, product_name='pre_exp_1', n_parts=17, p_liaisons=0.14, mwf=0.86)
# Script to calculate operational flexibility values
import pickle
import numpy as np
import networkx as nx
import operator as op
from functools import reduce
iso = nx.algorithms.isomorphism
from src.graph_utilities.graph_builder import *
test_graph = {1: ([1, 2, 3, 4, 5, 6], [1, 2, 4, 5, 6], [3]),
2: ([1, 2, 4, 5, 6], [1, 2, 5, 6], [4]),
3: ([1, 2, 4, 5, 6], [1, 4, 5], [2, 6]),
4: ([1, 2, 5, 6], [1, 5], [2, 6]),
5: ([1, 4, 5], [1, 5], [4]),
6: ([1, 4, 5], [1, 4], [5]),
7: ([1, 4], [1], [4]),
8: ([1, 5], [1], [5]),
9: ([2, 6], [2], [6])}
def open_aog_file(product_name):
pickle_path = '../../data/and_or_pickles/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
and_or_graph = pickle.load(input_file)
return and_or_graph
def open_assembly_tree(product_name):
pickle_path = '../../out/assembly_tree/' + product_name + '.pickle'
with open(pickle_path, 'rb') as input_file:
assembly_tree = pickle.load(input_file)
return assembly_tree
# ----------------------------------------
# Functions to calc number of PGs from AOG
# ----------------------------------------
def calc_number_of_possible_PGs(AOG, plot_tree=False):
flipped_duplicates = find_duplicate_keys(AOG)
reduced_AOG = remove_duplicates(AOG, flipped_duplicates)
flipped_reduced_AOG = flip_reduced_AOG(reduced_AOG)
simple_nodes = list(flipped_reduced_AOG.values())
decision_tree = get_decision_tree(AOG, flipped_reduced_AOG, flipped_duplicates, simple_nodes)
if plot_tree==True:
color_map = get_color_map(decision_tree, simple_nodes)
plot_decision_tree(decision_tree, color_map)
num_end_points = calc_end_points(decision_tree)
return num_end_points
def estimate_number_of_possible_PGs(n_aog_edges):
# Formula determined by exponential regression analysis
y = 396.294 * np.exp(0.00268 * n_aog_edges)
return y
def remove_duplicates(AOG, flipped_duplicates):
reduced_AOG = {}
for key, val in AOG.items():
if str(val[0]) not in list(flipped_duplicates.keys()):
reduced_AOG[key] = val
return reduced_AOG
def flip_reduced_AOG(reduced_AOG):
flipped_reduced_AOG = {}
for key, val in reduced_AOG.items():
flipped_reduced_AOG[str(val[0])] = key
return flipped_reduced_AOG
def calc_end_points(decision_tree):
end_points = [node for node, out_degree in decision_tree.out_degree() if out_degree == 0]
num_end_points = len(end_points)
return num_end_points
def find_duplicate_keys(graph):
rev_multidict = {}
for key, value in graph.items():
rev_multidict.setdefault(str(value[0]), set()).add(key)
duplicate_values = [key for key, values in rev_multidict.items() if len(values) > 1]
duplicate_keys = {}
for key, value in graph.items():
if str(value[0]) in duplicate_values:
duplicate_keys.setdefault(str(value[0]), set()).add(key)
return duplicate_keys
# -----------------------------------------------
# Functions to calc number of sequences from PG
# -----------------------------------------------
def calc_number_of_possible_sequences(assembly_tree):
# calc with slot block theory (https://www.sciencedirect.com/science/article/abs/pii/S0166361597001036?via%3Dihub)
sorted_node_list = get_topological_sort_list_by_assembly_tiers(assembly_tree)
# building parse_tree
parse_tree = build_parse_tree(assembly_tree, sorted_node_list)
# visual check
#pos = hierarchy_pos(parse_tree, 's_INIT')
#nx.draw(parse_tree, pos, with_labels=True)
#plt.show()
# calc number of sequences from parse tree
num_sequences = calc_num_sequences(parse_tree)
return num_sequences
def calc_num_sequences(parse_tree):
parse_tree.remove_node('Start')
num_sequences = calc_num_plans_of_serial_node(parse_tree, 's_INIT')
return num_sequences
def calc_num_plans_of_parallel_node(parse_tree, node):
# get all successors of parallel node
num_plans = 1
successors = list(parse_tree.successors(node))
# calc combination of first pair
n = dfs_num_of_all_successing_operations(parse_tree, successors[0])
m = dfs_num_of_all_successing_operations(parse_tree, successors[1])
n_plans_of_first_pair = total_num_combinations_of_parallel_branch(n, m)
num_plans = num_plans * n_plans_of_first_pair
if 's_' in str(successors[0]):
sub_plans = calc_num_plans_of_serial_node(parse_tree, successors[0])
num_plans = num_plans * sub_plans
if 's_' in str(successors[1]):
sub_plans = calc_num_plans_of_serial_node(parse_tree, successors[1])
num_plans = num_plans * sub_plans
visited = [successors[0], successors[1]]
# multiply intermediate result with next successors
for i in range(2, len(successors)):
n = 0
for elem in visited:
n += dfs_num_of_all_successing_operations(parse_tree, elem)
m = dfs_num_of_all_successing_operations(parse_tree, successors[i])
num_plans = num_plans * total_num_combinations_of_parallel_branch(n, m)
# multiply with num of sub_plans in case it is a sequential node
if 's_' in str(successors[i]):
sub_plans = calc_num_plans_of_serial_node(parse_tree, successors[i])
num_plans = num_plans * sub_plans
visited.append(successors[i])
return num_plans
def calc_num_plans_of_serial_node(parse_tree, node):
num_plans = 1
successors = list(parse_tree.successors(node))
# get max of calced plans
for successor in successors:
if 'p_' in str(successor):
next_level_successors = list(parse_tree.successors(successor))
if next_level_successors != []: #workaround for parallel nodes with moved nodes
plans = calc_num_plans_of_parallel_node(parse_tree, successor)
if plans > num_plans:
num_plans = plans
return num_plans
def bfs_calc_subplans(visited, graph, node):
visited.append(node)
queue = []
queue.append(node)
while queue:
s = queue.pop(0)
if 's_' in str(s):
print(s)
if 'p_' in str(s):
print(s)
for successor in graph.successors(s):
if successor not in visited:
visited.append(successor)
queue.append(successor)
def dfs_num_of_all_successing_operations(parse_tree, local_root):
visited = set()
operations = []
dfs_operations(visited, operations, parse_tree, local_root)
return len(operations)
def dfs_operations(visited, operations, graph, node):
if node not in visited:
visited.add(node)
if is_operation(node):
operations.append(node)
for successor in graph.successors(node):
dfs_operations(visited, operations, graph, successor)
def is_operation(node):
if 's_' in str(node):
return False
if 'p_' in str(node):
return False
else:
return True
def total_num_combinations_of_parallel_branch(n, m):
sum = 0
lim = min(n+1, m)
for i in range(1, lim+1):
C1 = nCr(n+1, i)
C2 = nCr(m-1, i-1)
sum = sum + C1 * C2
return sum
def nCr(n, r):
r = min(r, n - r)
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer // denom
# -------------------------
def calc_total_number_of_sequences(AOG):
pass
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment