-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathATTRIBUTE_MATCH_original_Gamut NO VALS.py
More file actions
274 lines (211 loc) · 13 KB
/
ATTRIBUTE_MATCH_original_Gamut NO VALS.py
File metadata and controls
274 lines (211 loc) · 13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 16:39:52 2019
@author: xcxg109
"""
import pandas as pd
import numpy as np
import query_code_original_Gamut as q
import settings
import data_process_original_Gamut as process
import file_data_original_Gamut as fd
from typing import Dict
from queries_original_Gamut import gamut_attr_values, gamut_attr_query, gamut_usage_query
from queries_PIM import grainger_attr_query
import time
pd.options.mode.chained_assignment = None
def match_category(df):
"""compare data colected from matching file (match_df) with grainger and gamut data pulls and create a column to tell analysts
whether attributes from the two systems have been matched"""
df['Matching'] = 'no'
for row in df.itertuples():
grainger_string = str(row.Grainger_Attribute_Name)
gamut_string = str(row.Gamut_Attribute_Name)
if (grainger_string) == (gamut_string):
df.at[row.Index,'Matching'] = 'Match'
elif (grainger_string) in (gamut_string):
df.at[row.Index,'Matching'] = 'Potential Match'
elif (gamut_string) in (grainger_string):
df.at[row.Index,'Matching'] = 'Potential Match'
elif process.isBlank(row.Grainger_Attribute_Name) == False:
if process.isBlank(row.Gamut_Attribute_Name) == True:
df.at[row.Index,'Matching'] = 'Grainger only'
elif process.isBlank(row.Grainger_Attribute_Name) == True:
if process.isBlank(row.Gamut_Attribute_Name) == False:
df.at[row.Index,'Matching'] = 'Gamut only'
return df
def gamut_process(node, gamut_dict: Dict, k):
"""if gamut node has not been previously processed (in gamut_dict), process and add it to the dictionary"""
gamut_dict = dict()
gamut_df_1 = q.gamut_atts(gamut_attr_query, node, 'tax_att."categoryId"') #tprod."categoryId"') #get gamut attribute values for each gamut_l3 node\
if gamut_df_1.empty==False:
gamut_df_2 = q.gamut_atts(gamut_usage_query, node, 'tax_att."categoryId"') #tprod."categoryId"') #get gamut attribute values for each gamut_l3 node\
if gamut_df_2.empty==False:
gamut_df_2 = gamut_df_2.groupby(['Gamut_Attr_ID'])['Gamut_MERCH_Usage'].apply('; '.join).reset_index()
gamut_df = pd.merge(gamut_df_1, gamut_df_2, how = 'outer', on = 'Gamut_Attr_ID')
else:
gamut_df = gamut_df_1
gamut_df['Gamut_MERCH_Usage'] = ""
gamut_df.loc[gamut_df['Gamut_MERCH_Usage'] == '', 'Gamut_MERCH_Usage'] = np.nan
gamut_df = gamut_df.sort_values(['Gamut_Attr_ID', 'Gamut_MERCH_Usage']).drop_duplicates(subset = 'Gamut_Attr_ID')
gamut_df = gamut_df.drop_duplicates(subset='Gamut_Attr_ID') #gamut attribute IDs are unique, so no need to group by pim node before getting unique
gamut_df['alt_gamut_name'] = process.process_att(gamut_df['Gamut_Attribute_Name']) #prep att name for merge
gamut_dict[node] = gamut_df #store the processed df in dict for future reference
else:
print('{} EMPTY DATAFRAME'.format(node))
return gamut_dict, gamut_df
def grainger_assign_nodes (grainger_df, gamut_df):
"""assign gamut node data to grainger columns"""
att_list = []
node_ID = gamut_df['Gamut_Node_ID']
cat_ID = gamut_df['Gamut_Category_ID']
cat_name = gamut_df['Gamut_Category_Name']
node_name = gamut_df['Gamut_Node_Name']
pim_path = gamut_df['Gamut_PIM_Path']
att_list = grainger_df['Grainger_Attribute_Name'].unique()
# grainger_df.to_csv ("F:/CGabriel/Grainger_Shorties/OUTPUT/grainger_test.csv")
for att in att_list:
grainger_df.loc[grainger_df['Grainger_Attribute_Name'] == att, 'Gamut_Node_ID'] = node_ID
grainger_df.loc[grainger_df['Grainger_Attribute_Name'] == att, 'Gamut_Category_ID'] = cat_ID
grainger_df.loc[grainger_df['Grainger_Attribute_Name'] == att, 'Gamut_Category_Name'] = cat_name
grainger_df.loc[grainger_df['Grainger_Attribute_Name'] == att, 'Gamut_Node_Name'] = node_name
grainger_df.loc[grainger_df['Grainger_Attribute_Name'] == att, 'Gamut_PIM_Path'] = pim_path
return grainger_df
def gamut_assign_nodes (grainger_df, gamut_df):
"""assign grainger node data to gamut columns"""
att_list = []
blue = grainger_df['Grainger Blue Path'].unique()
seg_ID = grainger_df['Segment_ID'].unique()
seg_name = grainger_df['Segment_Name'].unique()
fam_ID = grainger_df['Family_ID'].unique()
fam_name = grainger_df['Family_Name'].unique()
cat_ID = grainger_df['Category_ID'].unique()
cat_name = grainger_df['Category_Name'].unique()
att_list = gamut_df['Gamut_Attribute_Name'].unique()
for att in att_list:
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Category_ID'] = cat_ID
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Grainger Blue Path'] = blue
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Segment_ID'] = seg_ID
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Segment_Name'] = seg_name
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Family_ID'] = fam_ID
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Family_Name'] = fam_name
gamut_df.loc[gamut_df['Gamut_Attribute_Name'] == att, 'Category_Name'] = cat_name
return gamut_df
def grainger_process(grainger_df, gamut_dict: Dict, k):
"""create a list of grainger skus, run through through the gamut_skus query and pull gamut attribute data if skus are present
concat both dataframs and join them on matching attribute names"""
df = pd.DataFrame()
cat_name = grainger_df['Category_Name'].unique()
cat_name = list(cat_name)
cat_name = cat_name.pop()
print('cat name = {} {}'.format(k, cat_name))
grainger_skus = grainger_df.drop_duplicates(subset='Grainger_SKU') #create list of unique grainger skus that feed into gamut query
grainger_sku_count = len(grainger_skus)
print('grainger sku count = ', grainger_sku_count)
grainger_df = grainger_df.drop_duplicates(subset=['Category_ID', 'Grainger_Attr_ID']) #group by Category_ID and attribute name and keep unique
grainger_df['Grainger Blue Path'] = grainger_df['Segment_Name'] + ' > ' + grainger_df['Family_Name'] + \
' > ' + grainger_df['Category_Name']
grainger_df = grainger_df.drop(['Grainger_SKU', 'Grainger_Attribute_Value'], axis=1) #remove unneeded columns
grainger_df['alt_grainger_name'] = process.process_att(grainger_df['Grainger_Attribute_Name']) #prep att name for merge
gamut_skus = q.gamut_skus(grainger_skus) #get gamut sku list to determine pim nodes to pull
if gamut_skus.empty==False:
#create a dictionary of the unique gamut nodes that corresponde to the grainger node
gamut_l3 = gamut_skus['Gamut_Node_ID'].unique() #create list of pim nodes to pull
print('GAMUT L3s ', gamut_l3)
for node in gamut_l3:
if node in gamut_dict:
gamut_df = gamut_dict[node]
else:
gamut_dict, gamut_df = gamut_process(node, gamut_dict, k)
if gamut_df.empty==False:
node_name = gamut_df['Gamut_Node_Name'].unique()
node_name = list(node_name)
node_name = node_name.pop()
print('node name = {} {}'.format(node, node_name))
# gamut_df.to_csv ("F:/CGabriel/Grainger_Shorties/OUTPUT/gamut_test.csv")
#add correlating grainger and gamut data to opposite dataframes
grainger_df = grainger_assign_nodes(grainger_df, gamut_df)
gamut_df = gamut_assign_nodes(grainger_df, gamut_df)
skus = gamut_skus[gamut_skus['Gamut_Node_ID'] == node]
temp_df = pd.merge(grainger_df, gamut_df, left_on=['alt_grainger_name', 'Category_ID', 'Gamut_Node_ID', 'Gamut_Category_ID', \
'Gamut_Category_Name', 'Gamut_Node_Name', 'Gamut_PIM_Path', 'Grainger Blue Path', \
'Segment_ID', 'Segment_Name', 'Family_ID', 'Family_Name', 'Category_Name'],
right_on=['alt_gamut_name', 'Category_ID', 'Gamut_Node_ID', 'Gamut_Category_ID', \
'Gamut_Category_Name', 'Gamut_Node_Name', 'Gamut_PIM_Path', 'Grainger Blue Path', \
'Segment_ID', 'Segment_Name', 'Family_ID', 'Family_Name', 'Category_Name'], how='outer')
temp_df = match_category(temp_df) #compare grainger and gamut atts and create column to say whether they match
temp_df['grainger_sku_count'] = grainger_sku_count
temp_df['gamut_sku_count'] = len(skus)#temp_skus['Gamut_SKU']
temp_df['Grainger-Gamut Terminal Node Mapping'] = cat_name+' -- '+node_name
temp_df['Gamut/Grainger SKU Counts'] = temp_df['gamut_sku_count'].map(str)+' / '+temp_df['grainger_sku_count'].map(str)
df = pd.concat([df, temp_df], axis=0, sort=False) #add prepped df for this gamut node to the final df
df['Matching'] = df['Matching'].str.replace('no', 'Potential Match')
else:
print('Gamut Node {} EMPTY DATAFRAME'.format(node))
else:
print('No Gamut SKUs for Grainger node {}'.format(k))
return df, gamut_dict #where gamut_att_temp is the list of all normalized values for gamut attributes
def attribute_process_singular(data_type, search_data):
gamut_df = pd.DataFrame()
grainger_df = pd.DataFrame()
grainger_skus = pd.DataFrame()
attribute_df = pd.DataFrame()
gamut_dict = dict()
start_time = time.time()
print('working...')
if data_type == 'grainger_query':
if search_level == 'cat.CATEGORY_ID':
for k in search_data:
grainger_df = q.gcom.grainger_q(grainger_attr_query, search_level, k)
if grainger_df.empty == False:
temp_df, gamut_dict = grainger_process(grainger_df, gamut_dict, k)
attribute_df = pd.concat([attribute_df, temp_df], axis=0, sort=False)
print ('Grainger ', k)
else:
print('No attribute data')
else:
for k in search_data:
print('K = ', k)
grainger_skus = q.grainger_nodes(k, search_level)
#grainger_skus = pd.concat([grainger_skus, temp_df], axis=0, sort=False)
grainger_l3 = grainger_skus['Category_ID'].unique() #create list of pim nodes to pull
print('grainger L3s = ', grainger_l3)
for j in grainger_l3:
grainger_df = q.gcom.grainger_q(grainger_attr_query, 'cat.CATEGORY_ID', j)
if grainger_df.empty == False:
temp_df, gamut_dict = grainger_process(grainger_df, gamut_dict, j)
attribute_df = pd.concat([attribute_df, temp_df], axis=0, sort=False)
print ('Grainger ', j)
else:
print('Grainger node {} All SKUs are R4, R9, or discontinued'.format(j))
print("--- {} seconds ---".format(round(time.time() - start_time, 2)))
print("--- {} seconds ---".format(round(time.time() - start_time, 2)))
elif data_type == 'sku':
sku_str = ", ".join("'" + str(i) + "'" for i in search_data)
grainger_df = q.gcom.grainger_q(grainger_attr_query, 'item.MATERIAL_NO', sku_str)
if grainger_df.empty == False:
temp_df, gamut_dict = grainger_process(grainger_df, gamut_dict, sku_str)
attribute_df = pd.concat([attribute_df, temp_df], axis=0, sort=False)
else:
print('All SKUs are R4, R9, or discontinued')
print("--- {} seconds ---".format(round(time.time() - start_time, 2)))
return attribute_df
#determine SKU or node search
search_level = 'cat.CATEGORY_ID'
data_type = fd.search_type()
if data_type == 'grainger_query':
search_level, data_process = fd.blue_search_level()
if data_process == 'one':
file_data = settings.get_files_in_directory()
for file in file_data:
search_data = [int(row[0]) for row in file_data[file][1:]]
attribute_df = attribute_process_singular(data_type, search_data)
attribute_df = attribute_df.drop(['Count'], axis=1)
fd.attribute_match_data_out(settings.directory_name, attribute_df, search_level)
elif data_process == "two":
search_data = fd.data_in(data_type, settings.directory_name)
attribute_df = attribute_process_singular(data_type, search_data)
if attribute_df.empty==False:
fd.attribute_match_data_out(settings.directory_name, attribute_df, search_level)
else:
print('EMPTY DATAFRAME')