import pandas as pd
from numpy import array
import numpy as np
import math
data_origional_supply = pd.read_excel('order.xls',sheet_name="供应商的供货量(m³)")
# 计算周期最大值
data_max = data_origional_supply.iloc[:,2:].apply(np.max, axis=1)
# 计算总和
data_sum_column = data_origional_supply.iloc[:,2:].apply(np.sum, axis=1)
# 添加总和
data_origional_supply['sum'] = data_sum_column
# 添加最大值
data_origional_supply['max'] = data_max
# 初始化
data_add1 = np.zeros((402,24))
# 平均值列名
add_column1 = ['week1','week2','week3','week4','week5','week6','week7','week8','week9','week10','week11','week12','week13','week14','week15','week16','week17','week18','week19','week20','week21','week22','week23','week24']
# 拼接平均值
df_add = pd.DataFrame(data=data_add1,columns=add_column1)
data_add_supply = pd.concat([data_origional_supply,df_add], axis=1)
# 周期最大值列名
add_column2 = ['max1','max2','max3','max4','max5','max6','max7','max8','max9','max10','max11','max12','max13','max14','max15','max16','max17','max18','max19','max20','max21','max22','max23','max24']
# 拼接周期最大值
df_add2 = pd.DataFrame(data=data_add1,columns=add_column2)
data_add_supply2 = pd.concat([data_add_supply,df_add2], axis=1)
#定义一个带参数的函数
def sum_week(row,i):
sum_temp = 0
for j in range(10):
sum_temp = sum_temp + row[i + j *24]
average = sum_temp / 10
return average
# 定义周期最大值选取
def max_week(row,i):
max_num = 0
for j in range(10):
if max_num < row[i + j *24]:
max_num = row[i + j *24]
return max_num
for index, row in data_add_supply.iterrows():
for i in range (24):
data_add_supply2.at[index,('week{}'.format(i+1))] = sum_week(row,2+i)
data_add_supply2.at[index,('max{}'.format(i+1))] = max_week(row,2+i)
data_add_supply2.drop(data_add_supply2.iloc[:, 2:242], inplace = True, axis = 1)
# 添加平均值列和平均值标准差列
data_add2 = np.zeros((402, 2))
add_column2 = ['average','average_sd']
df_add2 = pd.DataFrame(data=data_add2,columns=add_column2)
data_drop_supply2 = pd.concat([data_add_supply2,df_add2], axis=1)
# 平均值函数
def average(row):
sum_temp = 0
for j in range(4,28,1):
sum_temp += row[j]
average = sum_temp / 24
return average
# 平均值标准差函数
def average_sd(row,num):
sum_temp = 0
for j in range(4,28,1):
sum_temp += np.square(row[j] - num)
average_sd = np.sqrt(sum_temp / 24)
return average_sd
# 数据行迭代
for index,row in data_drop_supply2.iterrows():
data_drop_supply2.at[index,'average'] = average(row)
data_drop_supply2.at[index,'average_sd'] = average_sd(row,row['average'])
# 订货量导入
data_origional_order = pd.read_excel('order.xls',sheet_name="企业的订货量(m³)")
# 添加24周的列名
data_add3 = np.zeros((402,24))
df_add3 = pd.DataFrame(data=data_add3,columns=add_column1)
data_origional_order = pd.concat([data_origional_order,df_add3], axis=1)
# 计算周期平均订货量
def sum_week2(row,column_index):
sum_temp2 = 0
for j in range(10):
sum_temp2 += row[column_index + j*24]
average = sum_temp2 / 10
return average
for index,row in data_origional_order.iterrows():
for i in range (24):
data_origional_order.at[index,('week{}'.format(i+1))] = sum_week2(row,2+i)
# 删除原始240列数据
data_drop_order = data_origional_order.drop(data_origional_order.iloc[:, 0:242], inplace = False, axis = 1)
# 订货量和供货量列表拼接进行满意度计算
data_merge = pd.concat([data_drop_supply2,data_drop_order],axis=1,join="outer",ignore_index=True)
# 满意度计算
def satisfaction(row):
sum_temp = 0
count = 0
for j in range(4,28,1):
if(row[j + 50] == 0):
continue
sum_temp += row[j] / row[j + 50]
count += 1
average = sum_temp / count
return average
# 满意度标准差计算
def satisfaction_sd(row,num):
sum_temp = 0
count = 0
for j in range(4,28,1):
if(row[j+50] == 0):
continue
sum_temp += np.square((row[j] / row[j + 50]) - num)
count += 1
average = sum_temp / count
return average
# 满意值填充
for index,row in data_merge.iterrows():
data_merge.at[index,'satisfaction'] = satisfaction(row)
# 满意值标准差填充
for index,row in data_merge.iterrows():
data_merge.at[index,'satisfaction_sd'] = satisfaction_sd(row,row['satisfaction'])
# 对拼接的订单数据删除
data_week_check = data_merge.drop(data_merge.iloc[:, 54:78], inplace = False, axis = 1)
# 删除平均值数据
data_week_check2 = data_week_check.drop(data_week_check.iloc[:,4:28], inplace = False, axis = 1)
# 删除周期最大值
data_week_check3 = data_week_check.drop(data_week_check2.iloc[:,4:28], inplace = False, axis = 1)
# 删除平均值和周期最大值
data_week_check4 = data_week_check.drop(data_week_check.iloc[:,4:52], inplace = False, axis = 1)
# 去除前四列 id、材料、加和、最大值
data_weight1 = data_week_check4.drop(data_week_check4.iloc[:,0:4], inplace = False, axis = 1)
# 去除前三列 id、材料、加和
data_weight2 = data_week_check4.drop(data_week_check4.iloc[:,0:3], inplace = False, axis = 1)
# 列名重新定义
data_week_check4.columns = ['offer_ID','category','sum','max','average','average_sd','satisfaction','satisfaction_sd']
data_weight1.columns = ['average','average_sd','satisfaction','satisfaction_sd']
data_weight2.columns = ['max','average','average_sd','satisfaction','satisfaction_sd']
# 获取优质供货商
data_choose = data_week_check4.drop(data_week_check4[data_week_check4['sum']<5000].index | data_week_check4[data_weight2['max']<6000].index,inplace = False, axis = 0)
/tmp/ipykernel_974/1366495966.py:6: FutureWarning: Index.__or__ operating as a set operation is deprecated, in the future this will be a logical operation matching Series.__or__. Use index.union(other) instead
data_choose = data_week_check4.drop(data_week_check4[data_week_check4['sum']<5000].index | data_week_check4[data_weight2['max']<6000].index,inplace = False, axis = 0)
data_choose
def cal_weight(x):
'''熵值法计算变量的权重'''
# 标准化
x['average'] = x['average'].apply(lambda y: ((y - np.min(x['average'])) / (np.max(x['average']) - np.min(x['average']))))
x['average_sd'] = x['average_sd'].apply(lambda y: ((np.max(x['average_sd']) - y) / (np.max(x['average_sd']) - np.min(x['average_sd']))))
x['satisfaction'] = x['satisfaction'].apply(lambda y: ((y - np.min(x['satisfaction'])) / (np.max(x['satisfaction']) - np.min(x['satisfaction']))))
x['satisfaction_sd'] = x['satisfaction_sd'].apply(lambda y: ((np.max(x['satisfaction_sd'] - y )) / (np.max(x['satisfaction_sd']) - np.min(x['satisfaction_sd']))))
# 求k
rows = x.index.size # 行
cols = x.columns.size # 列
k = 1.0 / math.log(rows)
lnf = [[None] * cols for i in range(rows)]
# 矩阵计算--
# 信息熵
# p=array(p)
x = array(x)
lnf = [[None] * cols for i in range(rows)]
lnf = array(lnf)
for i in range(0, rows):
for j in range(0, cols):
if x[i][j] == 0:
lnfij = 0.0
else:
p = x[i][j] / x.sum(axis=0)[j]
lnfij = math.log(p) * p * (-k)
lnf[i][j] = lnfij
lnf = pd.DataFrame(lnf)
E = lnf
# 计算冗余度
d = 1 - E.sum(axis=0)
# 计算各指标的权重
w = [[None] * 1 for i in range(cols)]
for j in range(0, cols):
wj = d[j] / sum(d)
w[j] = wj
# 计算各样本的综合得分,用最原始的数据
w = pd.DataFrame(w)
return w
def cal_weight2(x):
'''熵值法计算变量的权重'''
# 标准化
x['max'] = x['max'].apply(lambda y: ((y - np.min(x['max'])) / (np.max(x['max']) - np.min(x['max']))))
cal_weight(x)
w = cal_weight(data_weight1) # 调用cal_weight
w.index = data_weight1.columns
w.columns = ['weight']
print(w)
weight
average 0.454370
average_sd 0.476776
satisfaction 0.030089
satisfaction_sd 0.038765
w2 = cal_weight(data_weight2) # 调用cal_weight
w2.index = data_weight2.columns
w2.columns = ['weight']
print(w2)
weight
max 0.349964
average 0.295357
average_sd 0.309921
satisfaction 0.019559
satisfaction_sd 0.025199
# 权重字典1加载
dict_ref = {}
dict_ref['average'] = w.iloc[0]['weight']
dict_ref['average_sd'] = w.iloc[1]['weight']
dict_ref['satisfaction'] = w.iloc[2]['weight']
dict_ref['satisfaction_sd'] = w.iloc[3]['weight']
# 权重字典2加载
dict_ref2 = {}
dict_ref2['max'] = w.iloc[0]['weight']
dict_ref2['average'] = w2.iloc[1]['weight']
dict_ref2['average_sd'] = w2.iloc[2]['weight']
dict_ref2['satisfaction'] = w2.iloc[3]['weight']
dict_ref2['satisfaction_sd'] = w2.iloc[4]['weight']
# 评估结果计算
def mask(weight,row):
result = 0
result = row[28]*weight['average']+ row[29]*weight['average_sd']+row['satisfaction']*weight['satisfaction']+ row['satisfaction_sd']*weight['satisfaction_sd']
return result
for index,row in data_week_check2.iterrows():
data_week_check2.at[index,'result'] = mask(dict_ref,row)
sort_result = data_week_check2.sort_values(by="result",ascending=False,inplace=False)
sort_result.columns = ['offer_ID','category','sum','max','week1','week2','week3','week4','week5','week6','week7','week8','week9','week10','week11','week12','week13','week14','week15','week16','week17','week18','week19','week20','week21','week22','week23','week24','average','average_sd','satisfaction','satisfaction_sd','result']
# 优质供货商选取
head50 = sort_result.iloc[0:50,:]
head50_plot = head50.drop(head50.iloc[:, 0:28], inplace = False, axis = 1)
head50_plot2 = head50_plot.drop(['result'], inplace = False, axis = 1)
head50_plot2['average'] *= 0.454370*2
head50_plot2['average_sd'] *=0.476776*2
head50_plot2['satisfaction'] *=0.030089*1000
head50_plot2['satisfaction_sd'] *=0.038765*1000
# 前50位供货商的结果因子分布图
head50_plot2.plot.barh(stacked=True,figsize=(15,20),fontsize= 20)
data_origional_trans = pd.read_excel('transform.xls',sheet_name="运输损耗率(%)")
(data_origional_trans == 0).astype(int).sum(axis=1)
# 平均值计算
def trans_average(row):
sum_temp = 0
count = 0
for j in range(1,241,1):
if(row[j] == 0):
continue
sum_temp += row[j]
count += 1
average = sum_temp / count
return average
# 平均值标准差计算
def trans_average_sd(row,num):
sum_temp = 0
count = 0
for j in range(1,241,1):
if(row[j] == 0):
continue
sum_temp += np.square(row[j] - num)
count += 1
average = np.sqrt(sum_temp / count)
return average
for index,row in data_origional_trans.iterrows():
data_origional_trans.at[index,'sum_arg'] = trans_average(row)
for index,row in data_origional_trans.iterrows():
data_origional_trans.at[index,'sum_arg_sd'] = trans_average_sd(row,row['sum_arg'])
test = data_origional_trans.drop(data_origional_trans.iloc[:, 1:241], inplace = False, axis = 1)
# 供应商可供货物对应的公司产能计算
dictOfWords = {'week1':{},'week2':{},'week3':{},'week4':{},'week5':{},'week6':{},'week7':{},'week8':{},'week9':{},'week10':{},'week11':{},'week12':{},'week13':{},'week14':{},'week15':{},'week16':{},'week17':{},'week18':{},'week19':{},'week20':{},'week21':{},'week22':{},'week23':{},'week24':{}}
col_temp_list2 = head50['category'].tolist()
for i in range (24):
area_dict = dict(zip(head50['offer_ID'], head50[add_column1[i]]))
j = 0
for key,value in area_dict.items():
if (col_temp_list2[j] == 'A'):
dictOfWords[add_column1[i]].update({key:value/0.6})
elif (col_temp_list2[j] == 'C'):
dictOfWords[add_column1[i]].update({key:value/0.72})
elif (col_temp_list2[j] == 'B'):
dictOfWords[add_column1[i]].update({key:value/0.66})
j += 1
dictOfWords[add_column1[i]] = dict(sorted(dictOfWords[add_column1[i]].items(), key=lambda items: items[1],reverse=True))
# dictOfWords = dict(sorted(dictOfWords.items(), key=lambda items: items[1],reverse=True))
dictOfWords2 = dict(reversed(dictOfWords.items()))
keep = 0
week_cnt = 0
temp_store = {}
for key in dictOfWords:
print(key)
trans_store = 0
if(week_cnt < 10):
for key, value in dictOfWords[key].items():
if(value >= 790):
print(key,int(value))
if key in temp_store:
temp_store[key] += 1
else:
temp_store[key] = 1
keep += int(value*0.9)
trans_store += int(value*0.95)
if(trans_store > 48000):
break
else:
j = 0
for key, value in dictOfWords[key].items():
if( value >= 1200): # col_temp_list2[j] != 'B' and
print(key,int(value))
# temp_store.add(key)
if key in temp_store:
temp_store[key] += 1
else:
temp_store[key] = 1
keep += int(value*0.9)
trans_store += int(value*0.95)
if(trans_store > 42000):
break
if(keep > 84600):
break
j += 1
keep -= 28200
if(week_cnt != 0 and week_cnt < 10 and keep < 28200*0.3): # 28200*0.3
print('error1')
print(keep)
break
if(week_cnt > 10 and keep < 0): # 28200*0.3
print('error2')
print(keep)
break
week_cnt += 1
# print('week_keep')
print(keep)
print(keep)
print(temp_store)
print(len(temp_store))
week1
S140 28274
S139 15465
S395 6246
S229 3963
20353
week2
S108 11946
S361 3151
S229 2435
S055 1842
S330 1437
S340 1256
S308 1251
S275 1221
S329 1180
S143 1130
S131 1059
S151 906
S208 896
S352 893
S268 879
20489
week3
S139 14571
S330 2543
S361 2422
S229 2261
S086 1756
S108 1609
S308 1539
S340 1425
S329 1405
S275 1368
S131 1219
S143 1121
S074 1083
S151 1072
S352 995
S268 943
S208 925
26720
week4
S361 3277
S338 3127
S229 2188
S308 1624
S340 1619
S330 1512
S131 1298
S275 1280
S329 1270
S114 1201
S108 1151
S143 1125
S151 1036
S268 988
S208 983
S352 981
S364 936
S291 933
S074 823
23136
week5
S140 32262
S361 3351
S330 2584
S229 2435
S340 1772
S308 1739
S275 1568
S329 1541
S131 1536
S108 1486
S143 1276
41330
week6
S361 2837
S229 2636
S308 2160
S340 1789
S329 1618
S275 1570
S330 1431
S108 1365
S131 1353
S143 1295
S352 1155
S151 1148
S268 968
S074 854
33088
week7
S229 2580
S361 2556
S330 2028
S340 1406
S308 1337
S329 1286
S275 1226
S108 1210
S143 1195
S131 1136
S151 1033
S352 998
S074 909
21899
week8
S229 3333
S361 2580
S330 1839
S308 1645
S340 1416
S275 1230
S329 1225
S108 1207
S143 1151
S131 1134
S356 1129
S282 1041
S352 960
S151 870
S268 829
S074 797
13846
week9
S229 3561
S361 3013
S330 2250
S275 1505
S340 1493
S395 1493
S143 1466
S329 1408
S108 1356
S308 1353
S282 1283
S151 1209
S356 1194
S131 1189
S306 1168
S074 1061
S352 1060
S268 840
10758
week10
S395 5223
S229 3135
S361 3058
S330 1624
S308 1619
S356 1579
S282 1433
S340 1360
S329 1318
S108 1283
S275 1263
S131 1166
S143 1146
S306 1104
S352 1065
S151 1044
S268 827
8881
week11
S151 26041
S201 4675
S395 3385
S229 2676
S330 2227
S361 2201
S308 1790
S282 1478
20706
week12
S151 29537
S395 10480
S229 3623
S361 3043
34520
week13
S201 51628
52785
week14
S395 12768
S201 4253
S229 2910
S361 2711
S330 2228
S282 1865
S275 1610
S340 1454
S329 1448
S308 1351
S108 1306
S131 1301
S356 1281
57422
week15
S395 6875
S229 2665
S361 2454
S330 2106
S282 1748
S201 1641
S308 1618
S356 1551
S340 1501
S329 1470
S108 1440
S210 1436
S275 1435
S151 1211
55455
week16
S361 3680
S330 2763
S229 2736
S201 2698
S151 2084
S395 2015
S282 1795
S356 1730
S108 1639
S308 1557
S340 1469
S275 1378
S329 1308
S143 1263
52558
week17
S395 10430
S229 2896
S361 2877
S201 2558
S282 1948
S330 1872
S275 1510
S308 1480
S108 1437
S356 1420
S340 1386
S151 1280
S306 1280
S329 1276
S143 1221
S131 1203
56826
week18
S229 2911
S361 2502
S395 2461
S201 2286
S282 2040
S356 1866
S330 1769
S308 1459
S329 1368
S340 1342
S143 1321
S275 1256
S108 1216
50046
week19
S229 3966
S356 2483
S282 2238
S361 2020
S201 1915
S308 1375
S329 1363
S340 1351
S330 1292
S275 1260
S108 1231
40292
week20
S201 3748
S229 3668
S282 2820
S284 2784
S361 2175
S330 1998
S356 1898
S308 1766
S395 1541
S329 1533
S275 1478
S131 1371
S108 1224
S143 1218
38391
week21
S229 4521
S143 4201
S201 3568
S282 2873
S361 2077
S356 2065
S308 1816
S330 1698
S108 1348
S275 1293
S329 1266
S340 1228
35351
week22
S229 4238
S361 3911
S282 2751
S201 2615
S308 1519
S329 1496
S356 1494
S143 1396
S108 1293
S340 1242
26912
week23
S201 15511
S361 3829
S229 3565
S330 2606
S282 2240
S356 1748
S108 1596
S074 1448
S151 1405
S275 1385
S340 1340
S329 1286
S308 1250
34000
week24
S330 14800
S308 12395
S201 5730
S229 3508
S361 2359
S356 2216
S143 1518
S340 1456
S329 1428
46668
46668
{'S140': 2, 'S139': 2, 'S395': 11, 'S229': 23, 'S108': 19, 'S361': 22, 'S055': 1, 'S330': 20, 'S340': 19, 'S308': 21, 'S275': 18, 'S329': 20, 'S143': 16, 'S131': 12, 'S151': 14, 'S208': 3, 'S352': 8, 'S268': 7, 'S086': 1, 'S074': 7, 'S338': 1, 'S114': 1, 'S364': 1, 'S291': 1, 'S356': 14, 'S282': 14, 'S306': 3, 'S201': 13, 'S210': 1, 'S284': 1}
30
head50.columns
head50_test = head50.set_index("offer_ID", inplace=False)
data_select = head50_test.loc[['S140','S139','S108','S330','S340','S308','S131','S395','S229','S275','S329','S143','S282','S201','S361','S151','S356']]
data_select['index'] = data_select.index
data_select3 = data_select.loc[['S140','S139','S395','S229','S201','S361','S151']]
data_select3['index'] = data_select3.index
data_select4 = data_select.loc[['S108','S330','S340','S308','S131','S275','S329','S143','S282','S356']]
data_select4['index'] = data_select4.index
# 以60分进行基准,最后一名60分,向前一名加5分,权重分配为平均值为70%,标准差30%,进行供货商运输能力分数评估
trans_arg = [70,85,95,75,60,90,65,80]
trans_arg_sd = [85,90,95,65,60,75,70,80]
trans_result = [0,0,0,0,0,0,0,0]
for i in range(8):
trans_result[i] = trans_arg[i]*0.7+trans_arg_sd[i]*0.3
print(trans_result)
print('T3','T2','T6','T8','T1','T4','T7','T5')
[74.5, 86.5, 95.0, 72.0, 60.0, 85.5, 66.5, 80.0]
T3 T2 T6 T8 T1 T4 T7 T5
test['evaluation'] = trans_result