input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
10271, 10273, 10289, 10301, 10303,
10313, 10321, 10331, 10333, 10337, 10343, 10357, 10369,
10391, 10399, 10427, 10429, 10433, 10453, 10457, 10459,
10463, 10477, 10487, 10499, 10501, 10513, 10529, 10531,
10559, 10567, 10589, 10597, 10601, 10607, 10613, 10627,
10631, 10639, 10651, 10657, 10663, 10667, 10687, 10691,
10709, 10711, 10723, 10729, 10733, 10739, 10753, 10771,
10781, 10789, 10799, 10831, 10837, 10847, 10853, 10859,
10861, 10867, 10883, 10889, 10891, 10903, 10909, 10937,
10939, 10949, 10957, 10973, 10979, 10987, 10993, 11003,
11027, 11047, 11057, 11059, 11069, 11071, 11083, 11087,
11093, 11113, 11117, 11119, 11131, 11149, 11159, 11161,
11171, 11173, 11177, 11197, 11213, 11239, 11243, 11251,
11257, 11261, 11273, 11279, 11287, 11299, 11311, 11317,
11321, 11329, 11351, 11353, 11369, 11383, 11393, 11399,
11411, 11423, 11437, 11443, 11447, 11467, 11471, 11483,
11489, 11491, 11497, 11503, 11519, 11527, 11549, 11551,
11579, 11587, 11593, 11597, 11617, 11621, 11633, 11657,
11677, 11681, 11689, 11699, 11701, 11717, 11719, 11731,
11743, 11777, 11779, 11783, 11789, 11801, 11807, 11813,
11821, 11827, 11831, 11833, 11839, 11863, 11867, 11887,
11897, 11903, 11909, 11923, 11927, 11933, 11939, 11941,
11953, 11959, 11969, 11971, 11981, 11987, 12007, 12011,
12037, 12041, 12043, 12049, 12071, 12073, 12097, 12101,
12107, 12109, 12113, 12119, 12143, 12149, 12157, 12161,
12163, 12197, 12203, 12211, 12227, 12239, 12241, 12251,
12253, 12263, 12269, 12277, 12281, 12289, 12301, 12323,
12329, 12343, 12347, 12373, 12377, 12379, 12391, 12401,
12409, 12413, 12421, 12433, 12437, 12451, 12457, 12473,
12479, 12487, 12491, 12497, 12503, 12511, 12517, 12527,
12539, 12541, 12547, 12553, 12569, 12577, 12583, 12589,
12601, 12611, 12613, 12619, 12637, 12641, 12647, 12653,
12659, 12671, 12689, 12697, 12703, 12713, 12721, 12739,
12743, 12757, 12763, 12781, 12791, 12799, 12809, 12821,
12823, 12829, 12841, 12853, 12889, 12893, 12899, 12907,
12911, 12917, 12919, 12923, 12941, 12953, 12959, 12967,
12973, 12979, 12983, 13001, 13003, 13007, 13009, 13033,
13037, 13043, 13049, 13063, 13093, 13099, 13103, 13109,
13121, 13127, 13147, 13151, 13159, 13163, 13171, 13177,
13183, 13187, 13217, 13219, 13229, 13241, 13249, 13259,
13267, 13291, 13297, 13309, 13313, 13327, 13331, 13337,
13339, 13367, 13381, 13397, 13399, 13411, 13417, 13421,
13441, 13451, 13457, 13463, 13469, 13477, 13487, 13499,
13513, 13523, 13537, 13553, 13567, 13577, 13591, 13597,
13613, 13619, 13627, 13633, 13649, 13669, 13679, 13681,
13687, 13691, 13693, 13697, 13709, 13711, 13721, 13723,
13729, 13751, 13757, 13759, 13763, 13781, 13789, 13799,
13807, 13829, 13831, 13841, 13859, 13873, 13877, 13879,
13883, 13901, 13903, 13907, 13913, 13921, 13931, 13933,
13963, 13967, 13997, 13999, 14009, 14011, 14029, 14033,
14051, 14057, 14071, 14081, 14083, 14087, 14107, 14143,
14149, 14153, 14159, 14173, 14177, 14197, 14207, 14221,
14243, 14249, 14251, 14281, 14293, 14303, 14321, 14323,
14327, 14341, 14347, 14369, 14387, 14389, 14401, 14407,
14411, 14419, 14423, 14431, 14437, 14447, 14449, 14461,
14479, 14489, 14503, 14519, 14533, 14537, 14543, 14549,
14551, 14557, 14561, 14563, 14591, 14593, 14621, 14627,
14629, 14633, 14639, 14653, 14657, 14669, 14683, 14699,
14713, 14717, 14723, 14731, 14737, 14741, 14747, 14753,
14759, 14767, 14771, 14779, 14783, 14797, 14813, 14821,
14827, 14831, 14843, 14851, 14867, 14869, 14879, 14887,
14891, 14897, 14923, 14929, 14939, 14947, 14951, 14957,
14969, 14983, 15013, 15017, 15031, 15053, 15061, 15073,
15077, 15083, 15091, 15101, 15107, 15121, 15131, 15137,
15139, 15149, 15161, 15173, 15187, 15193, 15199, 15217,
15227, 15233, 15241, 15259, 15263, 15269, 15271, 15277,
15287, 15289, 15299, 15307, 15313, 15319, 15329, 15331,
15349, 15359, 15361, 15373, 15377, 15383, 15391, 15401,
15413, 15427, 15439, 15443, 15451, 15461, 15467, 15473,
15493, 15497, 15511, 15527, 15541, 15551, 15559, 15569,
15581, 15583, 15601, 15607, 15619, 15629, 15641, 15643,
15647, 15649, 15661, 15667, 15671, 15679, 15683, 15727,
15731, 15733, 15737, 15739, 15749, 15761, 15767, 15773,
15787, 15791, 15797, 15803, 15809, 15817, 15823, 15859,
15877, 15881, 15887, 15889, 15901, 15907, 15913, 15919,
15923, 15937, 15959, 15971, 15973, 15991, 16001, 16007,
16033, 16057, 16061, 16063, 16067, 16069, 16073, 16087,
16091, 16097, 16103, 16111, 16127, 16139, 16141, 16183,
16187, 16189, 16193, 16217, 16223, 16229, 16231, 16249,
16253, 16267, 16273, 16301, 16319, 16333, 16339, 16349,
16361, 16363, 16369, 16381, 16411, 16417, 16421, 16427,
16433, 16447, 16451, 16453, 16477, 16481, 16487, 16493,
16519, 16529, 16547, 16553, 16561, 16567, 16573, 16603,
16607, 16619, 16631, 16633, 16649, 16651, 16657, 16661,
16673, 16691, 16693, 16699, 16703, 16729, 16741, 16747,
16759, 16763, 16787, 16811, 16823, 16829, 16831, 16843,
16871, 16879, 16883, 16889, 16901, 16903, 16921, 16927,
16931, 16937, 16943, 16963, 16979, 16981, 16987, 16993,
17011, 17021, 17027, 17029, 17033, 17041, 17047, 17053,
17077, 17093, 17099, 17107, 17117, 17123, 17137, 17159,
17167, 17183, 17189, 17191, 17203, 17207, 17209, 17231,
17239, 17257, 17291, 17293, 17299, 17317, 17321, 17327,
17333, 17341, 17351, 17359, 17377, 17383, 17387, 17389,
17393, 17401, 17417, 17419, 17431, 17443, 17449, 17467,
17471, 17477, 17483, 17489, 17491, 17497, 17509, 17519,
17539, 17551, 17569, 17573, 17579, 17581, 17597, 17599,
17609, 17623, 17627, 17657, 17659, 17669, 17681, 17683,
17707, 17713, 17729, 17737, 17747, 17749, 17761, 17783,
17789, 17791, 17807, 17827, 17837, 17839, 17851, 17863,
17881, 17891, 17903, 17909, 17911, 17921, 17923, 17929,
17939, 17957, 17959, 17971, 17977, 17981, 17987, 17989,
18013, 18041, 18043, 18047, 18049, 18059, 18061, 18077,
18089, 18097, 18119, 18121, 18127, 18131, 18133, 18143,
18149, 18169, 18181, 18191, 18199, 18211, 18217, 18223,
18229, 18233, 18251, 18253, 18257, 18269, 18287, 18289,
18301, 18307, 18311, 18313, 18329, 18341, 18353, 18367,
18371, 18379, 18397, 18401, 18413, 18427, 18433, 18439,
18443, 18451, 18457, 18461, 18481, 18493, 18503, 18517,
18521, 18523, 18539, 18541, 18553, 18583, 18587, 18593,
18617, 18637, 18661, 18671, 18679, 18691, 18701, 18713,
18719, 18731, 18743, 18749, 18757, 18773, 18787, 18793,
18797, 18803, 18839, 18859, 18869, 18899, 18911, 18913,
18917, 18919, 18947, 18959, 18973, 18979, 19001, 19009,
19013, 19031, 19037, 19051, 19069, 19073, 19079, 19081,
19087, 19121, 19139, 19141, 19157, 19163, 19181, 19183,
19207, 19211, 19213, 19219, 19231, 19237, 19249, 19259,
19267, 19273, 19289, 19301, 19309, 19319, 19333, 19373,
19379, 19381, 19387, 19391, 19403, 19417, 19421, 19423,
19427, 19429, 19433, 19441, 19447, 19457, 19463, 19469,
19471, 19477, 19483, 19489, 19501, 19507, 19531, 19541,
19543, 19553, 19559, 19571, 19577, 19583, 19597, 19603,
19609, 19661, 19681, 19687, 19697, 19699, 19709, 19717,
19727, 19739, 19751, 19753, 19759, 19763, 19777, 19793,
19801, 19813, 19819, 19841, 19843, 19853, 19861, 19867,
19889, 19891, 19913, 19919, 19927, 19937, 19949, 19961,
19963, 19973, 19979, 19991, 19993, 19997, 20011, 20021,
20023, 20029, 20047, 20051, 20063, 20071, 20089, 20101,
20107, 20113, 20117, 20123, 20129, 20143, 20147, 20149,
20161, 20173, 20177, 20183, 20201, 20219, 20231, 20233,
20249, 20261, 20269, 20287, 20297, 20323, 20327, 20333,
20341, 20347, 20353, 20357, 20359, 20369, 20389, 20393,
20399, 20407, 20411, 20431, 20441, 20443, 20477, 20479,
20483, 20507, 20509, 20521, 20533, 20543, 20549, 20551,
20563, 20593, 20599, 20611, 20627, 20639, 20641, 20663,
20681, 20693, 20707, 20717, 20719, 20731, 20743, 20747,
20749, 20753, 20759, 20771, 20773, 20789, 20807, 20809,
20849, 20857, 20873, 20879, 20887, 20897, 20899, 20903,
20921, 20929, 20939, 20947, 20959, 20963, 20981, 20983,
21001, 21011, 21013, 21017, 21019, 21023, 21031, 21059,
21061, 21067, 21089, 21101, 21107, 21121, 21139, 21143,
21149, 21157, 21163, 21169, 21179, 21187, 21191, 21193,
21211, 21221, 21227, 21247, 21269, 21277, 21283, 21313,
21317, 21319, 21323, 21341, 21347, 21377, 21379, 21383,
21391, 21397, 21401, 21407, 21419, 21433, 21467, 21481,
21487, 21491, 21493, 21499, 21503, 21517, 21521, 21523,
21529, 21557, 21559, 21563, 21569, 21577, 21587, 21589,
21599, 21601, 21611, 21613, 21617, 21647, 21649, 21661,
21673, 21683, 21701, 21713, 21727, 21737, 21739, 21751,
21757, 21767, 21773, 21787, 21799, 21803, 21817, 21821,
21839, 21841, 21851, 21859, 21863, 21871, 21881, 21893,
21911, 21929, 21937, 21943, 21961, 21977, 21991, 21997,
22003, 22013, 22027, 22031, 22037, 22039, 22051, 22063,
22067, 22073, 22079, 22091, 22093, 22109, 22111, 22123,
22129, 22133, 22147, 22153, 22157, 22159, 22171, 22189,
22193, 22229, 22247, 22259, 22271, 22273, 22277, 22279,
22283, 22291, 22303, 22307, 22343, 22349, 22367, 22369,
22381, 22391, 22397, 22409, 22433, 22441, 22447, 22453,
22469, 22481, 22483, 22501, 22511, 22531, 22541, 22543,
22549, 22567, 22571, 22573, 22613, 22619, 22621, 22637,
22639, 22643, 22651, 22669, 22679, 22691, 22697, 22699,
22709, 22717, 22721, | |
+ 1 and j == i + 1:
STRAIGHT_DHHSC.append({D[i], H[j], H[k], S[l], C[m]})
STRAIGHT_DHHSC.append({D[9], H[10], H[11], S[12], C[0]})
STRAIGHT_DHHSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHSH.append({D[i], H[j], H[k], S[l], H[m]})
STRAIGHT_DHHSH.append({D[9], H[10], H[11], S[12], H[0]})
STRAIGHT_DHHSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHSD.append({D[i], H[j], H[k], S[l], D[m]})
STRAIGHT_DHHSD.append({D[9], H[10], H[11], S[12], D[0]})
STRAIGHT_DHHCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHCS.append({D[i], H[j], H[k], C[l], S[m]})
STRAIGHT_DHHCS.append({D[9], H[10], H[11], C[12], S[0]})
STRAIGHT_DHHCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHCC.append({D[i], H[j], H[k], C[l], C[m]})
STRAIGHT_DHHCC.append({D[9], H[10], H[11], C[12], C[0]})
STRAIGHT_DHHCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHCH.append({D[i], H[j], H[k], C[l], H[m]})
STRAIGHT_DHHCH.append({D[9], H[10], H[11], C[12], H[0]})
STRAIGHT_DHHCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHCD.append({D[i], H[j], H[k], C[l], D[m]})
STRAIGHT_DHHCD.append({D[9], H[10], H[11], C[12], D[0]})
STRAIGHT_DHHHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHHS.append({D[i], H[j], H[k], H[l], S[m]})
STRAIGHT_DHHHS.append({D[9], H[10], H[11], H[12], S[0]})
STRAIGHT_DHHHC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHHC.append({D[i], H[j], H[k], H[l], C[m]})
STRAIGHT_DHHHC.append({D[9], H[10], H[11], H[12], C[0]})
STRAIGHT_DHHHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHHH.append({D[i], H[j], H[k], H[l], H[m]})
STRAIGHT_DHHHH.append({D[9], H[10], H[11], H[12], H[0]})
STRAIGHT_DHHHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHHD.append({D[i], H[j], H[k], H[l], D[m]})
STRAIGHT_DHHHD.append({D[9], H[10], H[11], H[12], D[0]})
STRAIGHT_DHHDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHDS.append({D[i], H[j], H[k], D[l], S[m]})
STRAIGHT_DHHDS.append({D[9], H[10], H[11], D[12], S[0]})
STRAIGHT_DHHDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHDC.append({D[i], H[j], H[k], D[l], C[m]})
STRAIGHT_DHHDC.append({D[9], H[10], H[11], D[12], C[0]})
STRAIGHT_DHHDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHDH.append({D[i], H[j], H[k], D[l], H[m]})
STRAIGHT_DHHDH.append({D[9], H[10], H[11], D[12], H[0]})
STRAIGHT_DHHDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHHDD.append({D[i], H[j], H[k], D[l], D[m]})
STRAIGHT_DHHDD.append({D[9], H[10], H[11], D[12], D[0]})
STRAIGHT_DHDSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDSS.append({D[i], H[j], D[k], S[l], S[m]})
STRAIGHT_DHDSS.append({D[9], H[10], D[11], S[12], S[0]})
STRAIGHT_DHDSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDSC.append({D[i], H[j], D[k], S[l], C[m]})
STRAIGHT_DHDSC.append({D[9], H[10], D[11], S[12], C[0]})
STRAIGHT_DHDSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDSH.append({D[i], H[j], D[k], S[l], H[m]})
STRAIGHT_DHDSH.append({D[9], H[10], D[11], S[12], H[0]})
STRAIGHT_DHDSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDSD.append({D[i], H[j], D[k], S[l], D[m]})
STRAIGHT_DHDSD.append({D[9], H[10], D[11], S[12], D[0]})
STRAIGHT_DHDCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDCS.append({D[i], H[j], D[k], C[l], S[m]})
STRAIGHT_DHDCS.append({D[9], H[10], D[11], C[12], S[0]})
STRAIGHT_DHDCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDCC.append({D[i], H[j], D[k], C[l], C[m]})
STRAIGHT_DHDCC.append({D[9], H[10], D[11], C[12], C[0]})
STRAIGHT_DHDCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_DHDCH.append({D[i], H[j], D[k], C[l], H[m]})
STRAIGHT_DHDCH.append({D[9], H[10], D[11], C[12], H[0]})
STRAIGHT_DHDCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j | |
(" + n_args + " given)")));
}
} else {
if (f.func_args.length > args.length) {
if (f.func_minargs == 1) {
@{{raise}}($new(@{{TypeError}}, B$str(f.__name__ + "() takes at least 1 argument (0 given)")));
} else {
@{{raise}}($new(@{{TypeError}}, B$str(f.__name__ + "() takes at least " + f.func_minargs + " arguments (" + n_args + " given)")));
}
}
if (f.func_minargs == 1) {
@{{raise}}($new(@{{TypeError}}, B$str(f.__name__ + "() takes at most 1 argument (" + n_args + " given)")));
} else {
@{{raise}}($new(@{{TypeError}}, B$str(f.__name__ + "() takes at most " + f.func_minargs + " arguments (" + n_args + " given)")));
}
}
}
if (f.func_starargs !== null) {
if (star === null) {
//star = B$tuple([]);
star = empty_tuple;
}
args.push(star);
}
if (module !== null && lineno !== null) {
if (typeof f._module != "undefined" &&
f._module !== null &&
typeof f._lineno != "undefined" &&
f._lineno !== null) {
$pyjs.track.module = f._module;
$pyjs.track.lineno = f._lineno;
}
}
if (dstar !== null) {
args.push(dstar);
args.push(null); // no named args
} else if (f.func_dstarargs !== null) {
dstar = B$dict();
args.push(dstar);
args.push(null); // no named args
}
}
if (typeof obj['$inst'] != "undefined" || typeof obj['func_type'] != "undefined") {
rval = f.apply(module, args);
} else {
// obj is an ordinary javascript object
rval = f.apply(obj, args);
}
if (typeof rval == "undefined") {
if (typeof f['__name__'] == 'undefined') {
return ${0,None}$;
} else {
@{{raise}}($new(@{{ValueError}}, B$str("return value of call is undefined")));
}
}
if (module !== null && lineno !== null) {
$pyjs.track = $pyjs.trackstack[track_len];
$pyjs.trackstack.splice(track_len, $pyjs.trackstack.length);
if (typeof $pyjs.track == "undefined" || $pyjs.track.lineno != lineno || $pyjs.track.module !== module) {
debugger;
}
}
return rval;""", locals())
def repl___new__(self, instance, cls, add_dict=True):
if add_dict is not True:
if add_dict.lower().strip() == 'false':
add_dict = False
else:
add_dict = True
if add_dict:
add_dict = """
if (typeof ${cls} == "undefined") {
debugger;
}
if (typeof ${cls}['__slots__'] == "undefined" || ${cls}['__slots__'].length > 0) {
${instance}['__dict__'] = B$dict();
${instance}['$dict'] = ${instance}['__dict__']['__object'];
}"""
else:
add_dict = ''
return self.substitute("""\
var ${instance} = function ( ) {
var args = Array.prototype.slice.call(arguments);
if (arguments.callee['__class__'] === @{{instancemethod}}) {
if (arguments.callee['im_self'] !== null) {
return @{{fcall}}.apply(this, [this, null, arguments.callee['im_func'], null, arguments.callee['im_self']].concat(args));
}
}
var a = @{{_getattr}}(arguments.callee, '__call__');
if (typeof a == "undefined") {
@{{raise}}($new(@{{TypeError}}, B$str("'" + _typeof(arguments.callee) + "' object is not callable")));
}
if (args.length >= 3) {
var len = args.length;
if ((args[len-3] === null || args[len-3]['__class__'] === @{{tuple}}) &&
(args[len-2] === null || args[len-3]['__class__'] === @{{dict}}) &&
(args[len-1] === null || typeof args[len-1]['__class__'] == "undefined")) {
return @{{fcallext}}.apply(this, [this, null, a, arguments.callee].concat(args));
}
}
return @{{fcall}}.apply(this, [this, null, a, arguments.callee].concat(args));
}
${instance}['toString'] = function ( ) {
try {
return @{{mcall}}(this, null, this, '__str__').valueOf();
} catch (e) {
}
try {
return "<" + this.__class__.__name__ + " instance>";
} catch (e) {
}
return "<instance>";
};
${instance}['$inst'] = true;%(add_dict)s
${instance}['__class__'] = ${cls};""" % locals(), locals())
def repl_create_instance(self, args, cls, mcall, fcall):
return self.substitute("""\
var method$, instance, mro$, module = this['__class__'] === @{{module}} ? this : null;
${0, getattribute, mro$, method$, ${cls}, '__new__'}$
if (method$ === B$__new__) {
${1, __new__, instance, ${cls}}$
} else {
instance = ${fcall}.apply(module, [module, null, method$, ${cls}, ${cls}].concat(${args}));
instance['__class__'] = cls;
}
if (instance['$inst'] === true) {
${1, getattribute, mro$, method$, ${cls}, '__init__'}$
if (method$ !== B$__init__) {
${2, bind_method, method$, method$, instance, _meth_src}$
var ret = ${fcall}.apply(module, [module, null, method$, null].concat(${args}));
if (ret !== @{{None}} && ret !== null) {
if (ret['__class__'] != "undefined") {
return @{{raise}}($new(@{{TypeError}}, B$str("__init__() should return None, not '" + ret['__class__']['__name__'] + "'")));
}
return @{{raise}}($new(@{{TypeError}}, B$str("__init__() should return None")));
}
}
}
return instance;""", locals())
def repl_bind_method(self, dst, src, obj, meth_src):
return self.substitute("""\
if (${meth_src}['$inst'] === false && ${obj}['__class__'] !== @{{module}} && typeof ${src} != "undefined" && typeof ${src}['$inst'] != "undefined") {
switch (${dst}['__class__']) {
case @{{function}}:
${3, __new__, _new_dst$, @{{instancemethod}}}$
_new_dst$['im_class'] = ${obj}['$inst'] === true ? ${obj}['__class__'] : ${obj};
_new_dst$['im_func'] = ${dst};
_new_dst$['im_self'] = ${obj}['$inst'] === true ? ${obj} : null;
${dst} = _new_dst$;
break;
case @{{staticmethod}}:
${dst} = ${dst}['im_func'];
break;
case @{{classmethod}}:
${3, __new__, _new_dst$, @{{instancemethod}}}$
_new_dst$['im_class'] = ${obj}['$inst'] === true ? ${obj}['__class__'] : ${obj};
_new_dst$['im_func'] = ${dst}['im_func'];
_new_dst$['im_self'] = ${obj}['$inst'] === true ? ${obj}['__class__'] : ${obj};
${dst} = _new_dst$;
break;
case @{{bool}}: // Some known to be non-descriptors
case @{{int}}:
case @{{long}}:
case @{{str}}:
break;
default:
// check for __get__ method in ${dst}
if (${dst}['$inst'] === true) {
var get$ = @{{_getattr}}(${dst}, '__get__');
if (typeof get$ != 'undefined') {
${dst} = @{{fcall}}(this, null, get$, ${dst}, ${obj}, ${obj}['__class__']);
}
}
break;
}
}""", locals())
def repl_attr_args_validate(self, _self, name):
return self.substitute("""\
if ($self['$inst'] !== true) {
@{{raise}}($new(@{{TypeError}}, B$str("can't apply this __getattribute__ to type object")));
}
if (${name}['__class__'] !== @{{str}} && typeof ${name} != 'string') {
@{{raise}}($new(@{{TypeError}}, B$str("attribute name must be string")));
}""", locals())
def repl_getattribute(self, mro, dst, src, name, break_after_instance=False):
if break_after_instance:
break_after_instance = 'break;\n '
else:
break_after_instance = ''
return self.substitute("""\
${dst} = [][1];
var ${mro} = ${src}['__mro__'];
var _meth_src = ${src};
switch (${src}['$inst']) {
case true:
if (${src}['__class__'] === @{{module}}) {
${dst} = ${src}['$dict'][${name}];
break;
} else if (${src}['__class__'] === @{{function}}) {
switch (${name}.charAt(0)) {
case 'i':
case '_':
${dst} = ${src}[${name}];
}
break;
}
var _noraise$ = @{{noraise}};
var ga;
${mro} = ${src}['__class__']['__mro__'];
for (var mro_i$ = 0; mro_i$ < ${mro}.length - 1; mro_i$++) {
var _mro$ = ${mro}[mro_i$];
var ga = _mro$['__getattribute__'];
if (typeof ga == "undefined") {
if (typeof _mro$ == "undefined" || typeof _mro$['$dict']['__getattribute__'] == "undefined") {
continue;
}
ga = _mro$['$dict']['__getattribute__'];
}
${3, bind_method, ga, ${src}, ${src}, ${src}['__class__']}$
@{{noraise}} = @{{AttributeError}};
${dst} = @{{fcall}}(this, null, ga, _mro$, ${name});
@{{noraise}} = _noraise$;
if (${dst} === @{{AttributeError}}) {
${dst} = [][1];
}
_meth_src = ${src}['__class__'];
${src} = ${src}['__class__'];
break;
}
if (typeof ${dst} == "undefined") {
if (typeof ${src}['$dict'] != "undefined") {
${dst} = ${src}['$dict'][${name}];
if (typeof ${dst} != "undefined") {
if (${dst} !== {}[${name}]) {
break;
}
${dst} = [][1];
}
}
switch (${name}.charAt(0)) {
case 'i':
case '_':
${dst} = ${src}[${name}];
}
if (typeof ${dst} != "undefined") {
break;
}
}${break_after_instance}
case false:
if (typeof ${dst} == "undefined") {
var _mro$, ga;
if (${src}['$inst'] === true) {
_meth_src = ${src}['__class__'];
} else {
switch (${name}.charAt(0)) {
case 'i':
case '_':
${dst} = ${src}[${name}];
}
if (typeof ${dst} != "undefined") {
break;
}
}
if (typeof ${dst} == "undefined") {
for (var mro_i$ = 0; mro_i$ < ${mro}.length; mro_i$++) {
_mro$ = ${mro}[mro_i$];
${dst} = _mro$['$dict'][${name}];
if (typeof ${dst} != "undefined") {
if (${dst} !== {}[${name}]) {
break;
}
${dst} = [][1];
}
switch (${name}.charAt(0)) {
case 'i':
case '_':
${dst} = _mro$[${name}];
}
if (typeof ${dst} != "undefined") {
break;
}
}
}
if (typeof ${dst} == "undefined" && ${name} !== '__get__') {
for (var mro_i$ = 0; mro_i$ < ${mro}.length - 1; mro_i$++) {
_mro$ = ${mro}[mro_i$];
if (typeof _mro$['$dict'] == "undefined" || typeof _mro$['$dict']['__getattr__'] == "undefined") {
continue;
}
ga = _mro$['$dict']['__getattr__'];
${5, bind_method, ga, ${src}, ${src}, ${src}['__class__']}$
@{{noraise}} = @{{AttributeError}};
${dst} = @{{fcall}}(this, null, ga, _mro$, ${name});
@{{noraise}} = _noraise$;
if (${dst} === @{{AttributeError}}) {
${dst} = [][1];
}
// TODO : unbind ${dst} ?
break;
}
}
}
break;
default:
${dst} = ${src}[${name}];
if (typeof ${dst} == "undefined" && typeof ${src}['$dict'] != "undefined") {
${dst} = ${src}['$dict'][${name}];
}
}""", locals())
def repl_getattributes(self, dst, src, name, value):
return self.substitute("""\
var attrname, attrnames, ga, mro, _${src} = ${src};
if (${name} instanceof Array) {
attrnames = ${name};
} else {
attrnames = [${name}];
}
find_attr:
for (var attri = 0; attri < attrnames.length; attri++) {
attrname = attrnames[attri];
if (typeof attrname != 'string') {
if (typeof attrname['__s'] != "undefined") {
attrname = attrname['__s'];
} else {
@{{raise}}($new(@{{TypeError}}, B$str("attribute name must be string, not '" + _typeof(attrname) + "'")));
}
}
${1, getattribute, mro, ${dst}, _${src}, attrname}$
if (typeof ${dst} == "undefined") {
if (_${src}['$inst'] === true && _${src}['__class__'] !== @{{module}} && _${src}['__class__'] !== @{{function}}) {
if (typeof ${dst} == "undefined") {
if (${value} === null || typeof ${value} == "undefined") {
@{{raise}}($new(@{{AttributeError}}, B$str("'" + _${src}['__class__']['__name__'] + "' object has no attribute '" + attrname + "'")));
} else {
${dst} = | |
vanilla option to minus the identical knock out.
Parameters:
----------
Ns: Number of points in price axis
Nt: Number of points in time axis
theta:
0 : Fully implicit method
0.5 : Crank-nicolson method
According to reference, fully implicit method is better than crank-nicolson method
Reference :
<NAME>, <NAME>, <NAME>. PDE methods for pricing barrier options ☆[J].
Journal of Economic Dynamics & Control, 1997, 24(11-12):1563-1590.
ratio: The parameter use to controll the shape of the grid
m : monitoring times
'''
# discretize Nt-1 points between every two monitoring time, total Nt*m + 1 gird in time axis
step = Nt
Nt = Nt * m
# set up parameters
mu = self.r - self.q
_range = 5 * self.sig * np.sqrt(self.maturity)
Smax = max(self.upper_barrier, max(self.spot_price, self.strike) * np.exp((mu - self.sig ** 2 / 2.0) * self.maturity + _range)) * 1.0000001
Smin = self.lower_barrier * 0.9999999
# totally Nt + 1 in row grid
dt = self.maturity/ float(Nt)
# generate non-uniform grid
s = np.linspace(Smin, Smax, Ns * (1 - ratio) + 1)
temp = [self.lower_barrier, self.spot_price, self.upper_barrier]
lower_index = np.array([sum(s < i) for i in temp]) -1
upper_index = lower_index + 1
delta_s = - s[lower_index] + s[upper_index]
delta_s = delta_s[0]
if lower_index[1] > lower_index[0] and lower_index[2] > lower_index[1]:
count = int(Ns * ratio / 3.0)
else:
count = int(Ns * ratio / 2.0)
ds = delta_s / (count - 1)
#enforce the grid density around key value
insert_vector = [np.linspace(s[lower_index[j]] + ds, s[upper_index[j]] - ds, count ) for j in [0, 1, 2]]
s_temp = np.append(s[:lower_index[0]+1],insert_vector[0])
s_temp = np.append(s_temp,s[upper_index[0]:lower_index[1]+1])
if lower_index[1] > lower_index[0]:
s_temp = np.append(s_temp,insert_vector[1])
s_temp = np.append(s_temp,s[upper_index[1]:lower_index[2]+1])
if lower_index[2] > lower_index[1]:
s_temp = np.append(s_temp,insert_vector[2])
s_temp = np.append(s_temp,s[upper_index[2]:])
s = s_temp
Ns = len(s) - 1
# initialize the payoff
if self.position == 'CALL':
V_Nt = np.maximum(s - self.strike, 0) * np.where(
(s <= self.upper_barrier) & (s >= self.lower_barrier), 1, 0)
payoff = np.maximum(s - self.strike, 0)
else:
V_Nt = np.maximum(self.strike - s, 0) * np.where(
(s <= self.upper_barrier) & (s >= self.lower_barrier), 1, 0)
payoff = np.maximum(- s + self.strike, 0)
# initialize the Dirichlet boundary condition
if self.position == "CALL":
f_0 = np.linspace(0, 0, Nt + 1)
f_Ns = np.linspace(0, 0, Nt + 1)
elif self.position == "PUT":
f_0 = np.linspace(0, 0, Nt + 1)
f_Ns = np.linspace(0, 0, Nt + 1)
# initialize the tridiagonal matrix by scalar-form
delta_s_i = 0.5 * (s[2:] - s[0:Ns - 1])
delta_s_plus = s[2:] - s[1:Ns]
delta_s_minus = s[1:Ns] - s[0:Ns - 1]
# from a_2 to a_I-1 are in the calculation matrix
a = - (1.0 - theta) * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_minus) + (
1 - theta) * mu * s[1:Ns] / (2 * delta_s_i)
# from b_1 to b_I-1 are in the calculation matrix
b = 1.0 / dt + (1 - theta) * self.r + (1.0 - theta) * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_minus)
b = b + (1.0 - theta) * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_plus)
# from c_1 to c_I-2 are in the calculation matrix
c = - (1.0 - theta) * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_plus) - (
1 - theta) * mu * s[1:Ns] / (2 * delta_s_i)
# from alpha_2 to alpha_I-1 are in the calculation matrix
alpha = theta * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_minus
) - theta * mu * s[1:Ns] / (2 * delta_s_i)
# from beta_1 to beta_I-1 are in the calculation matrix
beta = 1.0 / dt - theta * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_minus) - self.r * theta
beta = beta - theta * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_plus)
# from gamma_1 to gamma_I-2 are in the calculation matrix
gamma = theta * self.sig **2 * s[1:Ns] **2 / (2.0 * delta_s_i * delta_s_plus) + theta * mu * s[1:Ns] / (
2 * delta_s_i)
# From Nt to 1, calculate V_Nt-1, V_Nt-2, ..., V_0 (vectors)
V_Nplus = V_Nt[1:Ns]
for k in range(Nt, 0, -1):
#for k in range(1,0,-1):
#V_Nplus : b of Ax=b
V_Nplus = self.my_dot_product(alpha, beta, gamma, V_Nplus)
V_Nplus[0] = V_Nplus[0] - a[0] * f_0[k-1] + alpha[0] * f_0[k]
V_Nplus[Ns-2] = V_Nplus[Ns-2] - c[Ns-2] * f_Ns[k-1] + gamma[Ns-2] * f_Ns[k]
#V_N : Intial Guess for american case / x of Ax=b for european case
ab = self.tri_bound_to_ab(a,b,c)
V_N = linalg.solve_banded((1, 1), ab, V_Nplus)
#American process
if self.exercise_type == 'AMERICAN':
V_N = self.Projected_SOR(a[1:],b,c[:-1], V_Nplus, V_N, payoff[1:-1], k, step, s[1:Ns])
V_Nplus = V_N
#Monitoring process
if k % step == 0:
V_Nplus = V_Nplus * np.where(
(s[1:Ns] <= self.upper_barrier) & (s[1:Ns] >= self.lower_barrier), 1, 0)
# linear interpolation
index = sum(s < self.spot_price)
w = (self.spot_price - s[index-1]) / (s[index] - s[index-1])
v_0 = V_Nplus[index-1] * (1 - w) + w * V_Nplus[index]
'''
Above process is only for double knock out option
'''
if self.option_type == 'KNOCK-OUT-DOUBLE-BARRIER':
return v_0
else:
if self.position == 'CALL':
v_0 = self.Black_Scholes_Call() - v_0
return v_0
else:
if self.exercise_type == 'EUROPEAN':
v_0 = self.Black_Scholes_Put() - v_0
return v_0
else:
v_0 = self.BTM_Vanilla(1200) - v_0
return v_0
#========
def FDM_SingleBarrier_NonUnifromGrid(self, Ns, Nt, theta, ratio, m):
'''
Abstract:
--------
Finite difference method for barrier option.
Using a non-uniform grid, with shape controlled by input ratio.
Discrete monioring.
Iteration process is only suitable for knock out option.
For knock in, this funcition uses a vanilla option to minus the identical knock out.
Parameters:
----------
Ns: Number of points in price axis
Nt: Number of points in time axis
theta:
0 : Fully implicit method
0.5 : Crank-nicolson method
According to reference, fully implicit method is better than crank-nicolson method
Reference :
<NAME>, <NAME>, <NAME>. PDE methods for pricing barrier options ☆[J].
Journal of Economic Dynamics & Control, 1997, 24(11-12):1563-1590.
ratio: The parameter use to controll the shape of the grid
m : monitoring times
'''
# discretize Nt-1 points between every two monitoring time, total Nt*m + 1 gird in time axis
step = Nt
Nt = Nt * m
# set up parameters
mu = self.r - self.q
_range = 5 * self.sig * np.sqrt(self.maturity)
if self.option_type == 'DOWN-AND-OUT-BARRIER' or self.option_type == 'DOWN-AND-IN-BARRIER':
Smax = self.spot_price * np.exp((mu - self.sig ** 2 / 2.0) * self.maturity + _range)
Smin = self.barrier * 0.99999999
elif self.option_type == 'UP-AND-OUT-BARRIER' or self.option_type == 'UP-AND-IN-BARRIER':
Smax = max(self.barrier, self.strike) * 1.0000001
Smin = 0
# totally Nt + 1 in row grid
dt = self.maturity/ float(Nt)
# generate non-uniform grid
s = np.linspace(Smin, Smax, Ns * (1 - ratio) + 1)
if self.option_type == 'DOWN-AND-OUT-BARRIER':
temp = [self.barrier, self.spot_price]
elif self.option_type == 'UP-AND-OUT-BARRIER':
temp = [self.spot_price, self.barrier]
elif self.option_type == 'DOWN-AND-IN-BARRIER':
temp = [self.barrier, self.spot_price]
else:
temp = [self.spot_price, self.barrier]
lower_index = np.array([sum(s < i) for i in temp]) -1
upper_index = lower_index + 1
delta_s = - s[lower_index] + s[upper_index]
delta_s = delta_s[0]
if lower_index[1] > lower_index[0]:
count = int(Ns * ratio / 2.0)
else:
count = int(Ns * ratio)
ds = delta_s / (count - 1)
#enforce the grid density around key value
insert_vector = [np.linspace(s[lower_index[j]] + ds, s[upper_index[j]] - ds, count ) for j in [0, 1]]
s_temp = np.append(s[:lower_index[0]+1],insert_vector[0])
s_temp = np.append(s_temp,s[upper_index[0]:lower_index[1]+1])
if lower_index[1] > lower_index[0]:
s_temp = np.append(s_temp,insert_vector[1])
s_temp = np.append(s_temp,s[upper_index[1]:])
s = s_temp
Ns = len(s) - 1
# initialize the payoff
if self.position == 'CALL':
if self.option_type == 'DOWN-AND-OUT-BARRIER' or self.option_type == | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EntityTypeArgs', 'EntityType']
@pulumi.input_type
class EntityTypeArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
kind: pulumi.Input[str],
enable_fuzzy_extraction: Optional[pulumi.Input[bool]] = None,
entities: Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EntityType resource.
:param pulumi.Input[str] display_name: The name of this entity type to be displayed on the console.
:param pulumi.Input[str] kind: Indicates the kind of entity type.
* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value.
* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity
types can contain references to other entity types (with or without aliases).
* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values.
Possible values are `KIND_MAP`, `KIND_LIST`, and `KIND_REGEXP`.
:param pulumi.Input[bool] enable_fuzzy_extraction: Enables fuzzy entity extraction during classification.
:param pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]] entities: The collection of entity entries associated with the entity type.
Structure is documented below.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "kind", kind)
if enable_fuzzy_extraction is not None:
pulumi.set(__self__, "enable_fuzzy_extraction", enable_fuzzy_extraction)
if entities is not None:
pulumi.set(__self__, "entities", entities)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The name of this entity type to be displayed on the console.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Indicates the kind of entity type.
* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value.
* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity
types can contain references to other entity types (with or without aliases).
* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values.
Possible values are `KIND_MAP`, `KIND_LIST`, and `KIND_REGEXP`.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="enableFuzzyExtraction")
def enable_fuzzy_extraction(self) -> Optional[pulumi.Input[bool]]:
"""
Enables fuzzy entity extraction during classification.
"""
return pulumi.get(self, "enable_fuzzy_extraction")
@enable_fuzzy_extraction.setter
def enable_fuzzy_extraction(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_fuzzy_extraction", value)
@property
@pulumi.getter
def entities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]]:
"""
The collection of entity entries associated with the entity type.
Structure is documented below.
"""
return pulumi.get(self, "entities")
@entities.setter
def entities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]]):
pulumi.set(self, "entities", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _EntityTypeState:
def __init__(__self__, *,
display_name: Optional[pulumi.Input[str]] = None,
enable_fuzzy_extraction: Optional[pulumi.Input[bool]] = None,
entities: Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering EntityType resources.
:param pulumi.Input[str] display_name: The name of this entity type to be displayed on the console.
:param pulumi.Input[bool] enable_fuzzy_extraction: Enables fuzzy entity extraction during classification.
:param pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]] entities: The collection of entity entries associated with the entity type.
Structure is documented below.
:param pulumi.Input[str] kind: Indicates the kind of entity type.
* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value.
* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity
types can contain references to other entity types (with or without aliases).
* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values.
Possible values are `KIND_MAP`, `KIND_LIST`, and `KIND_REGEXP`.
:param pulumi.Input[str] name: The unique identifier of the entity type. Format: projects/<Project ID>/agent/entityTypes/<Entity type ID>.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enable_fuzzy_extraction is not None:
pulumi.set(__self__, "enable_fuzzy_extraction", enable_fuzzy_extraction)
if entities is not None:
pulumi.set(__self__, "entities", entities)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this entity type to be displayed on the console.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="enableFuzzyExtraction")
def enable_fuzzy_extraction(self) -> Optional[pulumi.Input[bool]]:
"""
Enables fuzzy entity extraction during classification.
"""
return pulumi.get(self, "enable_fuzzy_extraction")
@enable_fuzzy_extraction.setter
def enable_fuzzy_extraction(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_fuzzy_extraction", value)
@property
@pulumi.getter
def entities(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]]:
"""
The collection of entity entries associated with the entity type.
Structure is documented below.
"""
return pulumi.get(self, "entities")
@entities.setter
def entities(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EntityTypeEntityArgs']]]]):
pulumi.set(self, "entities", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Indicates the kind of entity type.
* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value.
* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity
types can contain references to other entity types (with or without aliases).
* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values.
Possible values are `KIND_MAP`, `KIND_LIST`, and `KIND_REGEXP`.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The unique identifier of the entity type. Format: projects/<Project ID>/agent/entityTypes/<Entity type ID>.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class EntityType(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
display_name: Optional[pulumi.Input[str]] = None,
enable_fuzzy_extraction: Optional[pulumi.Input[bool]] = None,
entities: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EntityTypeEntityArgs']]]]] = None,
kind: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents an entity type. Entity types serve as a tool for extracting parameter values from natural language queries.
To get more information about EntityType, see:
* [API documentation](https://cloud.google.com/dialogflow/docs/reference/rest/v2/projects.agent.entityTypes)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dialogflow/docs/)
## Example Usage
### Dialogflow Entity Type Basic
```python
import pulumi
import pulumi_gcp as gcp
basic_agent = gcp.diagflow.Agent("basicAgent",
display_name="example_agent",
default_language_code="en",
time_zone="America/New_York")
basic_entity_type = gcp.diagflow.EntityType("basicEntityType",
display_name="",
kind="KIND_MAP",
entities=[
gcp.diagflow.EntityTypeEntityArgs(
value="value1",
synonyms=[
"synonym1",
"synonym2",
],
),
gcp.diagflow.EntityTypeEntityArgs(
value="value2",
synonyms=[
"synonym3",
"synonym4",
],
),
],
opts=pulumi.ResourceOptions(depends_on=[basic_agent]))
```
## Import
EntityType can be imported using any of these accepted formats
```sh
$ pulumi import gcp:diagflow/entityType:EntityType default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] display_name: The name of this entity type to be displayed on the console.
:param pulumi.Input[bool] enable_fuzzy_extraction: Enables fuzzy entity extraction during classification.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EntityTypeEntityArgs']]]] entities: The collection of entity entries associated with the entity type.
Structure is documented below.
:param pulumi.Input[str] kind: Indicates the kind of entity type.
* KIND_MAP: Map entity types allow mapping of a group of synonyms to a reference value.
* KIND_LIST: List entity types contain a set of entries that do not map to reference values. However, list entity
types can contain references to other entity types (with or without aliases).
* KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values.
Possible values are `KIND_MAP`, `KIND_LIST`, and `KIND_REGEXP`.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EntityTypeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents an entity type. Entity types | |
import os
import logging
from chatterbox.models import Activity
from .date import tumblr_date_to_datetime, datetime_to_string
log = logging.getLogger(__name__)
def activity_from_dict(data):
log.debug("Converting Tumblr dict to Activity Model")
activity_dict = activity_dict_from_dict(data)
return Activity.from_activity_dict(activity_dict)
def activity_dict_from_dict(blob):
log.debug("Converting Tumblr dict to activity dict: %s", blob)
stream_object = {}
stream_object["@context"] = "http://www.w3.org/ns/activitystreams"
stream_object["@type"] = "Activity"
date = tumblr_date_to_datetime(blob.get('date'))
stream_object["published"] = datetime_to_string(date)
stream_object["provider"] = {
"@type": "Service",
"displayName": "Tumblr"
}
stream_object["actor"] = {
"@type": "Person",
"@id": "https://{}.tumblr.com".format(blob.get("blog_name")),
"displayName": blob.get("blog_name"),
}
stream_object["object"] = {
"@id": blob.get('post_url'),
"content": blob.get("caption"),
"url": [],
"tumblr:can_reply": blob.get("can_reply"),
"tumblr:can_send_in_message": blob.get("can_send_in_message"),
"tumblr:followed": blob.get("followed"),
"tumblr:format": blob.get("format"),
"tumblr:highlighted": blob.get("highlighted"),
"tumblr:id": blob.get("id"),
"tumblr:image_permalink": blob.get("image_permalink"),
"tumblr:liked": blob.get("liked"),
"tumblr:note_count": blob.get("note_count"),
"tubmlr:reblog": blob.get('reblog'),
"tubmlr:reblog_key": blob.get('reblog_key'),
"tubmlr:recommended_color": blob.get('recommended_color'),
"tubmlr:recommended_source": blob.get('recommended_source'),
"tubmlr:short_url": blob.get('short_url'),
"tubmlr:slug": blob.get('slug'),
"tubmlr:state": blob.get('state'),
"tubmlr:summary": blob.get('summary'),
"tubmlr:tags": blob.get('tags'),
"tubmlr:timestamp": blob.get('timestamp'),
"tubmlr:trail": blob.get('trail'),
"tubmlr:type": blob.get('type'),
# link only
"tubmlr:description": blob.get('description'),
"tubmlr:link_image": blob.get('link_image'),
"tubmlr:link_image_dimensions": blob.get('link_image_dimensions'),
"tubmlr:publisher": blob.get('publisher'),
"tubmlr:title": blob.get('title'),
}
stream_type = blob.get('type')
photos = blob.get("photos", [])
if len(photos):
for photo in photos:
_, ext = os.path.splitext(photo.get('original_size').get('url'))
media_type = "png" if ext.startswith(".png") else "jpeg"
stream_object["object"]["url"].append({
"@type": "Link",
"href": photo.get('original_size').get('url'),
"mediaType": "image/{}".format(media_type),
"tubmlr:alt_sizes": photo.get('alt_sizes'),
"tubmlr:caption": photo.get('caption'),
"tubmlr:height": photo.get('original_size').get('height'),
"tubmlr:width": photo.get('original_size').get('width'),
})
if stream_type == "photo":
stream_object["object"]["@type"] = "Image"
elif stream_type == "link":
stream_object["object"]["@type"] = "Link"
stream_object["object"]["href"] = blob.get('url')
elif stream_type == "text":
stream_object["object"]["@type"] = "Note"
stream_object["object"]["content"] = blob.get("body")
elif stream_type == "video":
stream_object["object"]["@type"] = "Video"
stream_object["object"]["tumblr:player"] = blob.get("player")
stream_object["object"]["tumblr:duration"] = blob.get("duration")
stream_object["object"][
"tumblr:html5_capable"] = blob.get("html5_capable")
stream_object["object"][
"tumblr:thumbnail_height"] = blob.get("thumbnail_height")
stream_object["object"][
"tumblr:thumbnail_width"] = blob.get("thumbnail_width")
stream_object["object"][
"tumblr:thumbnail_url"] = blob.get("thumbnail_url")
stream_object["object"]["tumblr:video_type"] = blob.get("video_type")
stream_object["object"]["url"] = [{
"href": blob.get("video_url"),
"@type": "Link"
}]
if not blob.get("video_url"):
stream_object["object"]["@type"] = "Link"
stream_object["object"]["href"] = blob.get("permalink_url")
elif stream_type == "answer":
stream_object["object"]["@type"] = "Note"
stream_object["object"]["content"] = blob.get("answer")
stream_object["object"]["tumblr:asking_name"] = blob.get("asking_name")
stream_object["object"]["tumblr:asking_url"] = blob.get("asking_url")
stream_object["object"]["tumblr:question"] = blob.get("question")
elif stream_type == "chat":
stream_object["object"]["@type"] = "Note"
stream_object["object"]["content"] = blob.get("body")
stream_object["object"]["tumblr:dialogue"] = blob.get("dialogue")
elif stream_type == "quote":
stream_object["object"]["@type"] = "Note"
stream_object["object"]["content"] = blob.get("text")
stream_object["object"]["tumblr:source"] = blob.get("source")
elif stream_type == "audio":
stream_object["object"]["@type"] = "Audio"
stream_object["object"]["url"] = [{
"href": blob.get("audio_url"),
"@type": "Link"
}]
stream_object["object"][
"tumblr:audio_source_url"] = blob.get("audio_source_url")
stream_object["object"]["tumblr:audio_type"] = blob.get("audio_type")
stream_object["object"]["tumblr:embed"] = blob.get("embed")
stream_object["object"]["tumblr:player"] = blob.get("player")
stream_object["object"]["tumblr:plays"] = blob.get("plays")
stream_object["object"][
"tumblr:source_title"] = blob.get("source_title")
stream_object["object"]["tumblr:source_url"] = blob.get("source_url")
else:
# GOTTA UPATE FEADER!
log.error("tumlr: Update utils/tubmlr.py activity_dict_from_dict method")
log.error(blob)
log.error("missed type: %s", blob.get('type'))
return stream_object
"""
AUDIO
{u'audio_source_url': u'https://www.tumblr.com/audio_file/pitchersandpoets/25448766013/tumblr_m5vo97tJ3L1qfnhhq',
u'audio_type': u'tumblr',
u'audio_url': u'https://www.tumblr.com/audio_file/pitchersandpoets/25448766013/tumblr_m5vo97tJ3L1qfnhhq',
u'blog_name': u'pitchersandpoets',
u'can_reply': False,
u'can_send_in_message': True,
u'caption': u'<p><a class="tumblr_blog" href="http://toddzwillich.tumblr.com/post/25447401556/sen-harry-reid-d-nev-channels-nationals">toddzwillich</a>:</p>\n<blockquote>\n<p>Sen. <NAME> (D-Nev.) channels Nationals outfielder Bryce Harper in a press availability with reporters.</p>\n</blockquote>\n<p><NAME> would never do this. </p>',
u'date': u'2012-06-19 19:11:05 GMT',
u'embed': u'<iframe class="tumblr_audio_player tumblr_audio_player_25448766013" src="http://pitchersandpoets.tumblr.com/post/25448766013/audio_player_iframe/pitchersandpoets/tumblr_m5vo97tJ3L1qfnhhq?audio_file=https%3A%2F%2Fwww.tumblr.com%2Faudio_file%2Fpitchersandpoets%2F25448766013%2Ftumblr_m5vo97tJ3L1qfnhhq" frameborder="0" allowtransparency="true" scrolling="no" width="540" height="85"></iframe>',
u'followed': False,
u'format': u'html',
u'highlighted': [],
u'id': 25448766013,
u'liked': False,
u'note_count': 63,
u'player': u'<embed type="application/x-shockwave-flash" src="https://secure.assets.tumblr.com/swf/audio_player.swf?audio_file=https%3A%2F%2Fwww.tumblr.com%2Faudio_file%2Fpitchersandpoets%2F25448766013%2Ftumblr_m5vo97tJ3L1qfnhhq&color=FFFFFF" height="27" width="207" quality="best" wmode="opaque"></embed>',
u'plays': 10389,
u'post_url': u'http://pitchersandpoets.tumblr.com/post/25448766013/toddzwillich-sen-harry-reid-d-nev-channels',
u'reblog': {u'comment': u'<p><NAME> would never do this. </p>',
u'tree_html': u'<p><a class="tumblr_blog" href="http://toddzwillich.tumblr.com/post/25447401556/sen-harry-reid-d-nev-channels-nationals">toddzwillich</a>:</p><blockquote>\n<p>Sen. Harry Reid (D-Nev.) channels Nationals outfielder Bryce Harper in a press availability with reporters.</p>\n</blockquote>'},
u'reblog_key': u'<KEY>',
u'recommended_color': None,
u'recommended_source': None,
u'short_url': u'http://tmblr.co/ZhSWUxNitLez',
u'slug': u'toddzwillich-sen-harry-reid-d-nev-channels',
u'source_title': u'toddzwillich',
u'source_url': u'http://toddzwillich.tumblr.com/post/25447401556/sen-harry-reid-d-nev-channels-nationals',
u'state': u'published',
u'summary': u'Sen. Harry Reid (D-Nev.) channels Nationals outfielder Bryce Harper in a press availability with reporters.',
u'tags': [],
u'timestamp': 1340133065,
u'trail': [{u'blog': {u'active': True,
u'name': u'toddzwillich',
u'theme': {u'avatar_shape': u'square',
u'background_color': u'#FAFAFA',
u'body_font': u'Helvetica Neue',
u'header_bounds': u'',
u'header_image': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_10.png?_v=eafbfb1726b334d86841955ae7b9221c',
u'header_image_focused': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_10_focused_v3.png?_v=eafbfb1726b334d86841955ae7b9221c',
u'header_image_scaled': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_10_focused_v3.png?_v=eafbfb1726b334d86841955ae7b9221c',
u'header_stretch': True,
u'link_color': u'#529ECC',
u'show_avatar': True,
u'show_description': True,
u'show_header_image': True,
u'show_title': True,
u'title_color': u'#444444',
u'title_font': u'Gibson',
u'title_font_weight': u'bold'}},
u'content': u'<p>Sen. <NAME> (D-Nev.) channels Nationals outfielder Bryce Harper in a press availability with reporters.</p>',
u'content_raw': u'<p>Sen. <NAME> (D-Nev.) channels Nationals outfielder Bryce Harper in a press availability with reporters.</p>',
u'is_root_item': True,
u'post': {u'id': u'25447401556'}},
{u'blog': {u'active': True,
u'name': u'pitchersandpoets',
u'theme': {u'avatar_shape': u'square',
u'background_color': u'#FAFAFA',
u'body_font': u'Helvetica Neue',
u'header_bounds': u'',
u'header_image': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_02.png?_v=b976ee00195b1b7806c94ae285ca46a7',
u'header_image_focused': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_02_focused_v3.png?_v=b976ee00195b1b7806c94ae285ca46a7',
u'header_image_scaled': u'https://secure.assets.tumblr.com/images/default_header/optica_pattern_02_focused_v3.png?_v=b976ee00195b1b7806c94ae285ca46a7',
u'header_stretch': True,
u'link_color': u'#529ECC',
u'show_avatar': True,
u'show_description': True,
u'show_header_image': True,
u'show_title': True,
u'title_color': u'#444444',
u'title_font': u'Gibson',
u'title_font_weight': u'bold'}},
u'content': u'<p><NAME> would never do this. </p>',
u'content_raw': u'<p><NAME> would never do this. </p>',
u'is_current_item': True,
u'post': {u'id': u'25448766013'}}],
u'type': u'audio'}
QUOTE
{u'blog_name': u'pitchersandpoets',
u'can_reply': False,
u'can_send_in_message': True,
u'date': u'2014-04-07 17:19:57 GMT',
u'followed': False,
u'format': u'html',
u'highlighted': [],
u'id': 82004716592,
u'liked': False,
u'note_count': 7,
u'post_url': u'http://pitchersandpoets.tumblr.com/post/82004716592/to-the-17-full-fledged-members-of-the-giib-the',
u'reblog': {u'comment': u'<p>I wrote about the budding sabermetrics movement in Cuba<a href="http://sportsillustrated.cnn.com/vault/article/magazine/MAG1210053/index.htm"> for Sports Illustrated</a>.\xa0 (via <a class="tumblr_blog" href="http://ericnus.com/">ericnus</a>)</p>',
u'tree_html': u''},
u'reblog_key': u'<KEY>',
u'recommended_color': None,
u'recommended_source': None,
u'short_url': u'http://tmblr.co/ZhSWUx1CNtAmm',
u'slug': u'to-the-17-full-fledged-members-of-the-giib-the',
u'source': u'I wrote about the budding sabermetrics movement in Cuba<a href="http://sportsillustrated.cnn.com/vault/article/magazine/MAG1210053/index.htm"> for Sports Illustrated</a>.\xa0 (via <a class="tumblr_blog" href="http://ericnus.com/">ericnus</a>)',
u'state': u'published',
u'summary': u'To the 17 full-fledged members of the GIIB, the feeling and understanding of baseball are inseparable from one another. They\u2019re...',
u'tags': [],
u'text': u'To the 17 full-fledged members of the GIIB, the feeling and understanding of baseball are inseparable from one another. They\u2019re intellectuals, empiricists, the kind of guys who believe that the best way to express your love of something is to spend years studying and arguing about it. They talk about sabermetrics in the context of classic economists: \u201cMarx\u2019s economic theories are basically sabermetrics,\u201d says Aldama. \u201cIt\u2019s the elimination of subjectivity.\u201d',
u'timestamp': 1396891197,
u'type': u'quote'}
CHAT:
{u'blog_name': u'raspberryninjas',
u'body': u'Jeonghan:\nSeventeen fans:*flips a table* LOOK AT HIM. THIS IS TRUE ART, I CANNOT BELIEVE *cries*',
u'date': u'2016-01-18 06:14:28 GMT',
u'dialogue': [{u'label': u'Jeonghan:', u'name': u'Jeonghan', u'phrase': u''}, {u'label': u'Seventeen fans:', u'name': u'Seventeen fans', u'phrase': u'*flips a table* LOOK AT HIM. THIS IS TRUE ART, I CANNOT BELIEVE *cries*'}],
u'format': u'html',
u'highlighted': [],
u'id': 137534944663,
u'note_count': 81,
u'post_url': u'http://raspberryninjas.tumblr.com/post/137534944663/jeonghan-seventeen-fansflips-a-table-look-at',
u'reblog_key': u'<KEY>',
u'recommended_color': None,
u'recommended_source': None,
u'short_url': u'http://tmblr.co/ZsOe6q205kBMN',
u'slug': u'jeonghan-seventeen-fansflips-a-table-look-at',
u'state': u'published',
u'summary': u'Jeonghan:\nSeventeen fans:*flips a table* LOOK AT HIM. THIS IS TRUE ART, I CANNOT BELIEVE *cries*',
u'tags': [u'seventeen', u'jeonghan', u'dk', u'dino', u'woozi', u'seungkwan', u'scoups', u'hansol vernon chwe', u'joshua', u'jisoo', u'wonwoo', u'minggyu', u'hoshi', u'the8', u'jun'],
u'timestamp': 1453097668,
u'title': None,
u'type': u'chat'}
ANSWER:
{u'answer': u'<p>\u201cCan we get these?\u201d Chan asks, coming up to you with a pair of sweaters. They were the same shirt with slightly different patterns on them. Micky and Minnie.<br/></p><p>\u201cNo, couple things are cheesy. And besides, what happens if we break up?\u201d You say. He pouts.<br/></p><p>\u201cThat won\u2019t happen, right? You won\u2019t break up with me, right?\u201d He asks, worry starting to settle in his gut. You breathe out a laugh and pull him into your arms.<br/></p><p>\u201cOf course not, Chan. The only reason we\u2019d ever break up is if you got incredibly bored of me.\u201d You say.<br/></p><p>\u201cThat won\u2019t happen. Ever.\u201d<br/></p><hr><p><i>it\u2019s 2:30 in the morning forgive me for this mess</i></p><p><i><b>don\u2019t send any more please ^-^</b></i></p>',
u'asking_name': u'Anonymous',
u'asking_url': None,
u'blog_name': u'mingyoozi',
u'date': u'2016-01-18 10:51:50 GMT',
u'format': u'html',
u'highlighted': [],
u'id': 137544693488,
u'note_count': 12,
u'post_url': u'http://mingyoozi.tumblr.com/post/137544693488/dino-couple-items',
u'question': u'Dino + couple items',
u'reblog': {u'comment': u'<p>\u201cCan we get these?\u201d Chan asks, coming up to you with a pair of sweaters. They were the same shirt with slightly different patterns on them. Micky and Minnie.<br></p><p>\u201cNo, couple things are cheesy. And besides, what happens if we break up?\u201d You say. He pouts.<br></p><p>\u201cThat won\u2019t happen, right? You won\u2019t break up with me, right?\u201d He asks, worry starting to settle in his gut. You breathe out a laugh and pull him into your arms.<br></p><p>\u201cOf course not, Chan. The only reason we\u2019d ever break up is if you got incredibly bored of me.\u201d You say.<br></p><p>\u201cThat won\u2019t happen. Ever.\u201d<br></p><hr><p><i>it\u2019s 2:30 in the morning forgive me for this mess</i></p><p><i><b>don\u2019t send any more please ^-^</b></i></p>',
u'tree_html': u''},
u'reblog_key': u'<KEY>',
u'recommended_color': None,
u'recommended_source': None,
u'short_url': u'http://tmblr.co/ZKgZsk206JNRm',
u'slug': u'dino-couple-items',
u'state': u'published',
u'summary': u'Dino + couple items',
u'tags': [u'dino scenarios', u'dino', u'seventeen scenarios', u'drabble game 2', u'x', u'anon'],
u'timestamp': 1453114310,
u'trail': [{u'blog': {u'active': True,
u'name': u'mingyoozi',
u'theme': {u'avatar_shape': u'circle',
u'background_color': u'#FBFBFB',
u'body_font': u'Helvetica Neue',
u'header_bounds': u'0,435,242,5',
u'header_focus_height': 242,
u'header_focus_width': 430,
u'header_full_height': 242,
u'header_full_width': 500,
u'header_image': u'https://secure.static.tumblr.com/ab1baf2a8bd404117b373a26c766ca56/rcprpj9/dainzinpj/tumblr_static_.gif',
u'header_image_focused': u'https://secure.static.tumblr.com/ab1baf2a8bd404117b373a26c766ca56/rcprpj9/ftenzinpl/tumblr_static_tumblr_static__focused_v3.gif',
u'header_image_scaled': u'https://secure.static.tumblr.com/ab1baf2a8bd404117b373a26c766ca56/rcprpj9/dainzinpj/tumblr_static__2048_v2.gif',
u'header_stretch': True,
u'link_color': u'#F9D3AB',
u'show_avatar': True,
u'show_description': True,
u'show_header_image': False,
u'show_title': True,
u'title_color': u'#888888',
u'title_font': u'Lorimer No 2',
u'title_font_weight': u'regular'}},
u'content': u'<p>\u201cCan we get these?\u201d Chan asks, coming up to you with a pair of sweaters. They were the same shirt with slightly different patterns on them. Micky and Minnie.<br /></p><p>\u201cNo, couple things are cheesy. And besides, what happens if we break up?\u201d You say. He pouts.<br /></p><p>\u201cThat | |
area time series for all L3 regions
tf.random.set_seed(seed)
#X_test_dat= np.array(X_test_dat, dtype= np.float32)
if debug:
n_regions= 1 #18
else:
n_regions= 18
tot_months= 60
reg_size_df= pd.DataFrame({'mean_size': pd.Series(dtype= 'int'), 'low_1sig_size': pd.Series(dtype= 'int'), 'high_1sig_size': pd.Series(dtype= 'int'), \
'reg_indx': pd.Series(dtype= 'int')})
for i in range(n_regions):
if debug:
size_ind_df= size_test_df.reset_index()[['fire_size', 'fire_month', 'reg_indx']]
reg_ind_df= size_ind_df.groupby('reg_indx').get_group(regindx).groupby('fire_month')
else:
size_ind_df= size_test_df.reset_index()[['fire_size', 'fire_month', 'reg_indx']]
reg_ind_df= size_ind_df.groupby('reg_indx').get_group(i+1).groupby('fire_month')
mean_burnarea_tot= np.zeros(tot_months)
high_1sig_burnarea_tot= np.zeros(tot_months)
low_1sig_burnarea_tot= np.zeros(tot_months)
if debug:
fire_ind_grid= []
ml_param_grid= []
for m in range(tot_months):
mindx= m + 372
samp_arr= tf.zeros([10000, 0])
if freq_flag == 'ml':
if debug:
reg_freqs= regmlfreq.groupby('reg_indx').get_group(regindx) #replace with model instead of df and try with one region first
else:
reg_freqs= regmlfreq.groupby('reg_indx').get_group(i+1)
mean_freqs= reg_freqs['mean_freq'].iloc[[m]].to_numpy()[0] #iloc maintains month index for every region
high_freqs= reg_freqs['high_1sig_freq'].iloc[[m]].to_numpy()[0]
low_freqs= reg_freqs['low_1sig_freq'].iloc[[m]].to_numpy()[0]
elif freq_flag == 'data':
freq_size= np.int64(len(freqs_data)/18)
tmparr_1= np.linspace(0, len(freqs_data) - freq_size, 18, dtype= np.int64)
#tmparr_2= tmparr_1 + freq_size
if debug:
freqs= freqs_data.astype(np.int64)[tmparr_1[regindx - 1] + m]
else:
freqs= freqs_data.astype(np.int64)[tmparr_1[i] + m]
# for sampling from frequency distribution, create additional function from here
if mean_freqs == 0: #if mean freqs from distribution is zero, then set burned area to be zero
mean_burnarea_tot[m]= 0
high_1sig_burnarea_tot[m]= 0
low_1sig_burnarea_tot[m]= 0
#if debug:
#fire_ind_grid.append(np.array([0]))
#ml_param_grid.append(np.array([np.zeros(3*ncomps, dtype= np.float32)]))
else:
try:
fire_ind_arr= reg_ind_df.get_group(mindx).index.to_numpy() #replace with random draws of grid points from a RF learned 'fire potential' map
#print(m, mean_freqs, high_freqs, fire_ind_arr)
freqsarr= [mean_freqs, high_freqs, low_freqs] #low freqs are usually 0, so find a fix for that
for freqs in freqsarr:
if freqs > 0:
if freqs <= len(fire_ind_arr):
fire_ind_arr= np.random.choice(fire_ind_arr, freqs, replace= False)
else:
fire_ind_arr= np.append(fire_ind_arr, np.random.choice(fire_ind_arr, freqs - len(fire_ind_arr), replace= True)) #False might imply we run out of fires
ml_param_vec= mdn_model.predict(x= np.array(X_test_dat.iloc[fire_ind_arr], dtype= np.float32)) #note: different indexing than the fire_size_test df
samp_arr= tf.concat([samp_arr, stat_model(ml_param_vec).sample(10000, seed= 99)], axis= 1)
if debug:
fire_ind_grid.append(fire_ind_arr)
ml_param_grid.append(ml_param_vec)
size_samp_arr= tf.reduce_mean(samp_arr, axis= 0).numpy()
std_size_arr= tf.math.reduce_std(samp_arr, axis= 0).numpy()
high_1sig_err= deepcopy(std_size_arr)
tot_l1sig_arr= np.sqrt(np.sum(std_size_arr**2))
size_samp_arr[size_samp_arr > 2*max_size_arr[i]]= max_size_arr[i]
high_1sig_err[high_1sig_err > max_size_arr[i]]= max_size_arr[i]
tot_h1sig_arr= np.sqrt(np.sum(high_1sig_err**2))
if np.sum(size_samp_arr) > 2*sum_size_arr[i]:
mean_burnarea_tot[m]= sum_size_arr[i]
else:
mean_burnarea_tot[m]= np.sum(size_samp_arr)
high_1sig_burnarea_tot[m]= mean_burnarea_tot[m] + tot_h1sig_arr
low_1sig_burnarea_tot[m]= mean_burnarea_tot[m] - tot_l1sig_arr
if (mean_burnarea_tot[m] - tot_l1sig_arr) < 0:
low_1sig_burnarea_tot[m]= 0
#if np.max(size_samp_arr) > max_size_arr[i]:
# max_size_arr[i]= np.max(size_samp_arr)
#while np.sum(size_samp_arr) > 2*sum_size_arr[i]:
# rseed= np.random.randint(10000)
# size_samp_arr= tf.reduce_mean(stat_model(ml_param_vec).sample(10000, seed= tfp.random.sanitize_seed(rseed)), axis= 0).numpy()
# std_size_arr= tf.math.reduce_std(stat_model(ml_param_vec).sample(10000, seed= tfp.random.sanitize_seed(rseed)), axis= 0).numpy()
#if np.sum(size_samp_arr) > sum_size_arr[i]:
# sum_size_arr[i]= np.sum(size_samp_arr)
except KeyError:
if mean_freqs == 0:
mean_burnarea_tot[m]= 0 #current kludge and needs to be fixed
high_1sig_burnarea_tot[m]= 0
low_1sig_burnarea_tot[m]= 0
#if debug:
# fire_ind_grid.append(np.array([0]))
# ml_param_grid.append(np.array([np.zeros(3*ncomps, dtype= np.float32)]))
reg_indx_arr= (i+1)*np.ones(tot_months, dtype= np.int64)
reg_size_df= reg_size_df.append(pd.DataFrame({'mean_size': mean_burnarea_tot, 'low_1sig_size': low_1sig_burnarea_tot, 'high_1sig_size': high_1sig_burnarea_tot, \
'reg_indx': reg_indx_arr}), ignore_index=True)
if debug:
return reg_size_df, fire_ind_grid, ml_param_grid
else:
return reg_size_df
def reg_fire_size_func(X_train_dat, y_train_dat, X_val_dat, y_val_dat, size_test_df, X_test_dat, custom_ml_model= None, max_size_arr= None, sum_size_arr= None, \
func_flag= 'gpd', lnc_arr= [2, 16, 2], initializer= "he_normal", regflag= True, regrate= 0.001, doflag= True,\
epochs= 500, bs= 32, freq_flag= 'ml', regmlfreq= None, freqs_data= None, samp_weights= False, \
samp_weight_arr= None, loco= False, debug= False, regindx= None, rseed= None):
# Calculates the predicted fire burned areas as well as its 1 sigma uncertainty for all regions
if rseed == None:
rseed= np.random.randint(100)
tf.random.set_seed(rseed)
X_train_dat= np.array(X_train_dat, dtype= np.float32)
X_val_dat= np.array(X_val_dat, dtype= np.float32)
if func_flag == 'gpd':
n_layers, n_neurons, n_comps= lnc_arr
stat_model= gpd_model
loss_func= gpd_loss
acc_func= gpd_accuracy
elif func_flag == 'lognorm':
n_layers, n_neurons, n_comps= lnc_arr
stat_model= lognorm_model
loss_func= lognorm_loss
acc_func= lognorm_accuracy
elif func_flag == 'lognorm_gpd':
n_layers, n_neurons, n_comps= lnc_arr
stat_model= lognorm_gpd_model
loss_func= lognorm_gpd_loss
acc_func= lognorm_gpd_accuracy
print("Initialized a MDN with %d layers"%n_layers + " and %d neurons"%n_neurons)
es_mon = EarlyStopping(monitor='val_loss', min_delta=0, patience= 10, verbose=0, mode='auto', restore_best_weights=True)
mdn= MDN_size(layers= n_layers, neurons= n_neurons, components= n_comps, initializer= initializer, reg= regflag, regrate= regrate, dropout= doflag)
mdn.compile(loss=loss_func, optimizer= tf.keras.optimizers.Adam(learning_rate= 1e-4), metrics=[acc_func])
if samp_weights:
h= mdn.fit(x= X_train_dat, y= y_train_dat, epochs= epochs, validation_data=(X_val_dat, y_val_dat), callbacks= [es_mon], batch_size= bs, sample_weight= samp_weight_arr, verbose=0)
else:
h= mdn.fit(x= X_train_dat, y= y_train_dat, epochs= epochs, validation_data=(X_val_dat, y_val_dat), callbacks= [es_mon], batch_size= bs, verbose=0)
print("MDN trained for %d epochs"%len(h.history['loss']))
if loco:
return mdn, h
else:
if debug:
burnarea_df, fire_ind_grid, ml_param_grid= size_pred_func(mdn, stat_model, size_test_df, X_test_dat, max_size_arr, sum_size_arr, ncomps= n_comps, \
freq_flag= freq_flag, regmlfreq= regmlfreq, freqs_data= freqs_data, debug= True, regindx= regindx, seed= rseed)
return burnarea_df, fire_ind_grid, ml_param_grid
else:
burnarea_df= size_pred_func(mdn, stat_model, size_test_df, X_test_dat, max_size_arr, sum_size_arr, ncomps= n_comps, freq_flag= freq_flag, \
regmlfreq= regmlfreq, freqs_data= freqs_data, debug= False, regindx= regindx, seed= rseed)
return burnarea_df
def size_acc_func(pvec, obs_sizes, func_flag= 'gpd'):
if func_flag == 'gpd':
stat_model= gpd_model
elif func_flag == 'lognormal':
stat_model= lognormal_model
pmf_pred= stat_model(pvec).prob(obs_sizes)
obspmf= tfd.Empirical(obs_sizes)
pmf_obs= obspmf.cdf(obs_sizes)
acc= 100 - stats.entropy(pmf_obs, qk= pmf_pred) #converting convex KL divergence to concave equivalent
return acc
def fire_size_loco(firefile, reg_freq_df, res= '12km', n_iters= 10, n_epochs= 10, bs= 32, run_id= None):
# Calculates the variable importance for a fire frequency MDN using a LOCO approach
n_regions= 18
locoarr= ['VPD', 'Tmax', 'Forest', 'Urban', 'FM1000', 'Prec', 'Antprec', 'Ant_VPD', 'Avgprec', 'FFWI']
list_of_lists= []
size_train_df= pd.read_hdf('../data/clim_fire_size_%s_train_data.h5'%res)
max_fire_train_arr= []
sum_fire_train_arr= []
for r in range(n_regions):
max_fire_train_arr.append(np.max(np.concatenate(\
[size_train_df.groupby('reg_indx').get_group(r+1).groupby('fire_month').get_group(k).fire_size.to_numpy()/1e6 \
for k in size_train_df.groupby('reg_indx').get_group(r+1).groupby('fire_month').groups.keys()])))
sum_fire_train_arr.append(np.max([np.sum(\
size_train_df.groupby('reg_indx').get_group(r+1).groupby('fire_month').get_group(k).fire_size.to_numpy()/1e6) \
for k in size_train_df.groupby('reg_indx').get_group(r+1).groupby('fire_month').groups.keys()]))
max_fire_train_arr= np.asarray(max_fire_train_arr)
sum_fire_train_arr= np.asarray(sum_fire_train_arr)
for it in tqdm(range(n_iters)):
rseed= np.random.randint(100)
for var in range(len(locoarr) + 1):
if var == 0: # 0 corresponds to all variables
dropvarlist= ['CAPE', 'Solar', 'Ant_Tmax', 'RH', 'Ant_RH']
else:
print("Loading predictor variable data without %s"%locoarr[var - 1])
dropvarlist= ['CAPE', 'Solar', 'Ant_Tmax', 'RH', 'Ant_RH', locoarr[var - 1]]
X_sizes_train, X_sizes_val, y_sizes_train, y_sizes_val, fire_size_train, fire_size_test, X_sizes_test, \
y_sizes_test= fire_size_data(res= res, dropcols= dropvarlist)
mdn_size_gpd, _= reg_fire_size_func(X_train_dat= X_sizes_train, y_train_dat= y_sizes_train, X_val_dat= X_sizes_val, y_val_dat= y_sizes_val, \
size_test_df= fire_size_test, X_test_dat= X_sizes_test, max_size_arr= max_fire_train_arr, sum_size_arr= sum_fire_train_arr, \
func_flag= 'gpd', lnc_arr= [2, 8, 2], epochs= n_epochs, bs= bs, loco= True, rseed= rseed)
mdn_size_gpd.save('../sav_files/loco_size_runs_%s'%run_id + '/mdn_%s'%res + '_gpd_iter_run_%d'%(it+1) + '_var_%d'%(var))
for reg in range(n_regions):
reg_ml_size_df, fire_ind_arr, ml_param_arr= size_pred_func(mdn_model= mdn_size_gpd, stat_model= gpd_model, size_test_df= fire_size_test, \
X_test_dat= X_sizes_test, max_size_arr= max_fire_train_arr, sum_size_arr= sum_fire_train_arr, ncomps= 2, freq_flag= 'ml', \
regmlfreq= reg_freq_df, debug= True, regindx= (reg + 1), seed= rseed)
reg_sizes= y_sizes_test[np.concatenate(fire_ind_arr)]
param_vec= [item for sublist in ml_param_arr for item in sublist] # neat hack from: https://stackoverflow.com/questions/952914/
obs_sizes= mon_burned_area(firefile, (reg + 1))[372:]
mean_sizes= reg_ml_size_df['mean_size']
emp_accuracy= size_acc_func(pvec= param_vec, obs_sizes= reg_sizes, func_flag= 'gpd')
mod_accuracy= gpd_accuracy(reg_sizes, param_vec)
mod_loss= gpd_loss(reg_sizes, param_vec)
pearson_r= stats.pearsonr(obs_sizes, mean_sizes)
list_of_lists.append([it + 1, var, reg + 1, pearson_r[0], emp_accuracy, mod_accuracy.numpy(), mod_loss.numpy()])
var_df= pd.DataFrame(list_of_lists, columns=["Iteration", "Variable", "reg_indx", "Pearson_r", "Emp_Accuracy", "Mod_Accuracy", "Loss"])
return var_df
## ----------------------------------------------------------------- Random forest functions ----------------------------------------------------------------------------
def rf_fire_grid_run(clim_grid_train_df, rb_frac, n_features= 36, dropcols= ['RH', 'Ant_RH'], n_trees= 100, threshold= 0.4, criterion= 'gini', \
test_data= True, clim_grid_test_df= None):
# Creates a RF classifier instance for predicting fire probabilities at the grid scale
rseed= np.random.randint(100)
df1= clim_grid_train_df[clim_grid_train_df['fire_freq']==1] #.iloc[0:10000]
n1= len(df1)
n2= rb_frac*n1
df2= clim_grid_train_df[clim_grid_train_df['fire_freq']==0]
df2= df2.sample(n= int(n2))
df_train= pd.concat([df1, df2], sort= False).sample(frac= 1).reset_index(drop=True) #shuffling the rows
y_r = np.array(df_train.fire_freq)
X_r = df_train.iloc[:, 0:n_features].drop(columns= dropcols)
scaler= StandardScaler()
X_r= scaler.fit_transform(X_r)
X_r= np.array(X_r, dtype=np.float32)
X_train, X_val, y_train, y_val = train_test_split(X_r, y_r, test_size=0.3, random_state= 99)
rf= RandomForestClassifier(n_estimators= n_trees, criterion= criterion, random_state= rseed)
forest= rf.fit(X_train, y_train)
print("Trained the RF classifer on %d data points."%len(X_train))
if threshold == None:
predictions= rf.predict(X_val)
errors= abs(predictions - y_val)
else:
predicted_thresh= rf.predict_proba(X_val)
predictions= (predicted_thresh[:, 1] >= threshold).astype('int')
errors= abs(predictions - y_val)
print('Training MAE:', round(np.mean(errors), 6))
train_accuracy= metrics.accuracy_score(y_val, predictions)
train_f1_score= metrics.f1_score(y_val, predictions)
train_recall= metrics.recall_score(y_val, predictions)
train_metrics= [train_accuracy, train_f1_score, train_recall]
if test_data:
df3= clim_grid_test_df[clim_grid_test_df['fire_freq']==1] #.iloc[10000:]
df4= clim_grid_test_df[clim_grid_test_df['fire_freq']==0]
df4= df4.sample(1000000)
df_test= pd.concat([df3, df4], sort= False).sample(frac= 1).reset_index(drop=True) #shuffling the rows
y= np.array(df_test.fire_freq)
X= df_test.iloc[:, 0:n_features].drop(columns= dropcols)
X= scaler.fit_transform(X) #same scaler as training data
X= np.array(X, dtype=np.float32)
if threshold == None:
predictions= rf.predict(X)
errors= abs(predictions - y)
else:
predicted_thresh= rf.predict_proba(X)
predictions= (predicted_thresh[:, 1] >= threshold).astype('int')
errors= abs(predictions - y)
print('Test MAE:', round(np.mean(errors), 6))
test_accuracy= metrics.accuracy_score(y, predictions)
test_f1_score= metrics.f1_score(y, predictions)
test_recall= metrics.recall_score(y, predictions)
test_metrics= [test_accuracy, test_f1_score, test_recall]
return rf, forest, train_metrics, test_metrics
else:
return rf, forest, train_metrics
def rf_hyperparam_tuning(clim_grid_train_df, dropcols= ['Solar', 'Ant_Tmax', 'RH', 'Ant_RH'], rb_frac_arr= [10, 4, 3, 7/3, 3/2, 1], \
n_trees_arr= [50, 100, 250, 500, 1000], thresh_arr= [0.4, 0.5, None], n_iters= 5, run_id= None, modsave= False, \
test_data= True, clim_grid_test_df= None):
list_of_lists= | |
0, 0, 0, 1, -360, 360],
[49, 54, 0.073, 0.289, 0.0738, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 54, 0.0869, 0.291, 0.073, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.0169, 0.0707, 0.0202, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 56, 0.00275, 0.00955, 0.00732, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 56, 0.00488, 0.0151, 0.00374, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 57, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 57, 0.0474, 0.134, 0.0332, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 58, 0.0343, 0.0966, 0.0242, 9900, 0, 0, 0, 0, 1, -360, 360],
[51, 58, 0.0255, 0.0719, 0.01788, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 59, 0.0503, 0.2293, 0.0598, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0825, 0.251, 0.0569, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 59, 0.0803, 0.239, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[55, 59, 0.04739, 0.2158, 0.05646, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 60, 0.0317, 0.145, 0.0376, 9900, 0, 0, 0, 0, 1, -360, 360],
[59, 61, 0.0328, 0.15, 0.0388, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 61, 0.00264, 0.0135, 0.01456, 9900, 0, 0, 0, 0, 1, -360, 360],
[60, 62, 0.0123, 0.0561, 0.01468, 9900, 0, 0, 0, 0, 1, -360, 360],
[61, 62, 0.00824, 0.0376, 0.0098, 9900, 0, 0, 0, 0, 1, -360, 360],
[63, 59, 0, 0.0386, 0, 9900, 0, 0, 0.96, 0, 1, -360, 360],
[63, 64, 0.00172, 0.02, 0.216, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 61, 0, 0.0268, 0, 9900, 0, 0, 0.985, 0, 1, -360, 360],
[38, 65, 0.00901, 0.0986, 1.046, 9900, 0, 0, 0, 0, 1, -360, 360],
[64, 65, 0.00269, 0.0302, 0.38, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 66, 0.018, 0.0919, 0.0248, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 66, 0.0482, 0.218, 0.0578, 9900, 0, 0, 0, 0, 1, -360, 360],
[62, 67, 0.0258, 0.117, 0.031, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 66, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[66, 67, 0.0224, 0.1015, 0.02682, 9900, 0, 0, 0, 0, 1, -360, 360],
[65, 68, 0.00138, 0.016, 0.638, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 69, 0.0844, 0.2778, 0.07092, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 69, 0.0985, 0.324, 0.0828, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 69, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[69, 70, 0.03, 0.127, 0.122, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 70, 0.00221, 0.4115, 0.10198, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 71, 0.00882, 0.0355, 0.00878, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 72, 0.0488, 0.196, 0.0488, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 72, 0.0446, 0.18, 0.04444, 9900, 0, 0, 0, 0, 1, -360, 360],
[71, 73, 0.00866, 0.0454, 0.01178, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 74, 0.0401, 0.1323, 0.03368, 9900, 0, 0, 0, 0, 1, -360, 360],
[70, 75, 0.0428, 0.141, 0.036, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 75, 0.0405, 0.122, 0.124, 9900, 0, 0, 0, 0, 1, -360, 360],
[74, 75, 0.0123, 0.0406, 0.01034, 9900, 0, 0, 0, 0, 1, -360, 360],
[76, 77, 0.0444, 0.148, 0.0368, 9900, 0, 0, 0, 0, 1, -360, 360],
[69, 77, 0.0309, 0.101, 0.1038, 9900, 0, 0, 0, 0, 1, -360, 360],
[75, 77, 0.0601, 0.1999, 0.04978, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 78, 0.00376, 0.0124, 0.01264, 9900, 0, 0, 0, 0, 1, -360, 360],
[78, 79, 0.00546, 0.0244, 0.00648, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.017, 0.0485, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[77, 80, 0.0294, 0.105, 0.0228, 9900, 0, 0, 0, 0, 1, -360, 360],
[79, 80, 0.0156, 0.0704, 0.0187, 9900, 0, 0, 0, 0, 1, -360, 360],
[68, 81, 0.00175, 0.0202, 0.808, 9900, 0, 0, 0, 0, 1, -360, 360],
[81, 80, 0, 0.037, 0, 9900, 0, 0, 0.935, 0, 1, -360, 360],
[77, 82, 0.0298, 0.0853, 0.08174, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 83, 0.0112, 0.03665, 0.03796, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 84, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[83, 85, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[84, 85, 0.0302, 0.0641, 0.01234, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 86, 0.035, 0.123, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[86, 87, 0.02828, 0.2074, 0.0445, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 88, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[85, 89, 0.0239, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[88, 89, 0.0139, 0.0712, 0.01934, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0518, 0.188, 0.0528, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 90, 0.0238, 0.0997, 0.106, 9900, 0, 0, 0, 0, 1, -360, 360],
[90, 91, 0.0254, 0.0836, 0.0214, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[89, 92, 0.0393, 0.1581, 0.0414, 9900, 0, 0, 0, 0, 1, -360, 360],
[91, 92, 0.0387, 0.1272, 0.03268, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 93, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 94, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[93, 94, 0.0223, 0.0732, 0.01876, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 95, 0.0132, 0.0434, 0.0111, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 96, 0.0356, 0.182, 0.0494, 9900, 0, 0, 0, 0, 1, -360, 360],
[82, 96, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 96, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 97, 0.0183, 0.0934, 0.0254, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 98, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[80, 99, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 100, 0.0648, 0.295, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[94, 100, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[95, 96, 0.0171, 0.0547, 0.01474, 9900, 0, 0, 0, 0, 1, -360, 360],
[96, 97, 0.0173, 0.0885, 0.024, 9900, 0, 0, 0, 0, 1, -360, 360],
[98, 100, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[99, 100, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 101, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[92, 102, 0.0123, 0.0559, 0.01464, 9900, 0, 0, 0, 0, 1, -360, 360],
[101, 102, 0.0246, 0.112, 0.0294, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 103, 0.016, 0.0525, 0.0536, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 104, 0.0451, 0.204, 0.0541, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 104, 0.0466, 0.1584, 0.0407, 9900, 0, 0, 0, 0, 1, -360, 360],
[103, 105, 0.0535, 0.1625, 0.0408, 9900, 0, 0, 0, 0, 1, -360, 360],
[100, 106, 0.0605, 0.229, 0.062, 9900, 0, 0, 0, 0, 1, -360, 360],
[104, 105, 0.00994, 0.0378, 0.00986, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 106, 0.014, 0.0547, 0.01434, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
[105, 108, 0.0261, 0.0703, 0.01844, 9900, 0, 0, 0, 0, 1, -360, 360],
[106, 107, 0.053, 0.183, 0.0472, 9900, 0, 0, 0, 0, 1, -360, 360],
| |
"""
Inference Dataset
-----------------
"""
from abc import ABC, abstractmethod
from typing import Optional, Sequence, Tuple, Union
import numpy as np
from torch.utils.data import Dataset
from darts import TimeSeries
from darts.logging import raise_if_not
from .utils import CovariateType
class InferenceDataset(ABC, Dataset):
def __init__(self):
"""
Abstract class for all darts torch inference dataset.
It can be used as models' inputs, to obtain simple forecasts on each `TimeSeries`
(using covariates if specified).
The first elements of the tuples it contains are numpy arrays (which will be translated to torch tensors
by the torch DataLoader). The last elements of the tuples are the (past) target TimeSeries, which is
needed in order to properly construct the time axis of the forecast series.
"""
@abstractmethod
def __len__(self) -> int:
pass
@abstractmethod
def __getitem__(self, idx: int):
pass
@staticmethod
def _covariate_indexer(
ts_idx: int,
target_series: TimeSeries,
covariate_series: TimeSeries,
cov_type: CovariateType,
input_chunk_length: int,
output_chunk_length: int,
n: int,
):
"""returns tuple of (past_start, past_end, future_start, future_end)"""
# get the main covariate type: CovariateType.PAST or CovariateType.FUTURE
main_cov_type = (
CovariateType.PAST
if cov_type is CovariateType.PAST
else CovariateType.FUTURE
)
raise_if_not(
main_cov_type in [CovariateType.PAST, CovariateType.FUTURE],
"`main_cov_type` must be one of `(CovariateType.PAST, CovariateType.FUTURE)`",
)
# we need to use the time index (datetime or integer) here to match the index with the covariate series
past_start = target_series.time_index[-input_chunk_length]
past_end = target_series.time_index[-1]
if main_cov_type is CovariateType.PAST:
future_end = past_end + max(0, n - output_chunk_length) * target_series.freq
else: # CovariateType.FUTURE
future_end = past_end + max(n, output_chunk_length) * target_series.freq
future_start = (
past_end + target_series.freq if future_end != past_end else future_end
)
if input_chunk_length == 0: # for regression ensemble models
past_start, past_end = future_start, future_start
# check if case specific indexes are available
case_start = future_start if cov_type is CovariateType.FUTURE else past_start
raise_if_not(
covariate_series.start_time() <= case_start,
f"For the given forecasting case, the provided {main_cov_type.value} covariates at dataset index "
f"`{ts_idx}` do not extend far enough into the past. The {main_cov_type.value} covariates must start at "
f"time step `{case_start}`, whereas now they start at time step `{covariate_series.start_time()}`.",
)
raise_if_not(
covariate_series.end_time() >= future_end,
f"For the given forecasting horizon `n={n}`, the provided {main_cov_type.value} covariates "
f"at dataset index `{ts_idx}` do not extend far enough into the future. As `"
f"{'n > output_chunk_length' if n > output_chunk_length else 'n <= output_chunk_length'}"
f"` the {main_cov_type.value} covariates must end at time step `{future_end}`, "
f"whereas now they end at time step `{covariate_series.end_time()}`.",
)
# extract the index position (index) from time_index value
cov_start = covariate_series.time_index.get_loc(past_start)
cov_end = covariate_series.time_index.get_loc(future_end) + 1
return cov_start, cov_end
class GenericInferenceDataset(InferenceDataset):
def __init__(
self,
target_series: Union[TimeSeries, Sequence[TimeSeries]],
covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
n: int = 1,
input_chunk_length: int = 12,
output_chunk_length: int = 1,
covariate_type: CovariateType = CovariateType.PAST,
):
"""
Contains (past_target, past_covariates | historic_future_covariates, future_past_covariates | future_covariate).
"future_past_covariates" are past covariates that happen to be also known in the future - those
are needed for forecasting with n > output_chunk_length by any model relying on past covariates.
For this reason, when n > output_chunk_length, this dataset will also emmit the "future past_covariates".
"historic_future_covariates" are historic future covariates that are given for the input_chunk in the past.
Parameters
----------
target_series
The target series that are to be predicted into the future.
covariates
Optionally, one or a sequence of `TimeSeries` containing either past or future covariates. If covariates
were used during training, the same type of cavariates must be supplied at prediction.
n
Forecast horizon: The number of time steps to predict after the end of the target series.
input_chunk_length
The length of the target series the model takes as input.
output_chunk_length
The length of the target series the model emits in output.
"""
super().__init__()
self.target_series = (
[target_series] if isinstance(target_series, TimeSeries) else target_series
)
self.covariates = (
[covariates] if isinstance(covariates, TimeSeries) else covariates
)
self.covariate_type = covariate_type
self.n = n
self.input_chunk_length = input_chunk_length
self.output_chunk_length = output_chunk_length
raise_if_not(
(covariates is None or len(self.target_series) == len(self.covariates)),
"The number of target series must be equal to the number of covariates.",
)
def __len__(self):
return len(self.target_series)
def __getitem__(
self, idx: int
) -> Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray], TimeSeries]:
target_series = self.target_series[idx]
raise_if_not(
len(target_series) >= self.input_chunk_length,
f"All input series must have length >= `input_chunk_length` ({self.input_chunk_length}).",
)
# extract past target values
past_target = target_series.values(copy=False)[-self.input_chunk_length :]
# optionally, extract covariates
cov_past, cov_future = None, None
covariate_series = None if self.covariates is None else self.covariates[idx]
if covariate_series is not None:
# get start and end indices (integer) of the covariates including historic and future parts
cov_start, cov_end = self._covariate_indexer(
ts_idx=idx,
target_series=target_series,
covariate_series=covariate_series,
cov_type=self.covariate_type,
input_chunk_length=self.input_chunk_length,
output_chunk_length=self.output_chunk_length,
n=self.n,
)
# extract covariate values and split into a past (historic) and future part
covariate = covariate_series.values(copy=False)[cov_start:cov_end]
if self.input_chunk_length != 0: # regular models
cov_past, cov_future = (
covariate[: self.input_chunk_length],
covariate[self.input_chunk_length :],
)
else: # regression ensemble models have a input_chunk_length == 0 part for using predictions as input
cov_past, cov_future = covariate, covariate
# set to None if empty array
cov_past = cov_past if cov_past is not None and len(cov_past) > 0 else None
cov_future = (
cov_future if cov_future is not None and len(cov_future) > 0 else None
)
return past_target, cov_past, cov_future, target_series
class PastCovariatesInferenceDataset(InferenceDataset):
def __init__(
self,
target_series: Union[TimeSeries, Sequence[TimeSeries]],
covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
n: int = 1,
input_chunk_length: int = 12,
output_chunk_length: int = 1,
covariate_type: CovariateType = CovariateType.PAST,
):
"""
Contains (past_target, past_covariates, future_past_covariates).
"future_past_covariates" are past covariates that happen to be also known in the future - those
are needed for forecasting with n > output_chunk_length by any model relying on past covariates.
For this reason, when n > output_chunk_length, this dataset will also emmit the "future past_covariates".
Parameters
----------
target_series
The target series that are to be predicted into the future.
covariates
Optionally, some past-observed covariates that are used for predictions. This argument is required
if the model was trained with past-observed covariates.
n
Forecast horizon: The number of time steps to predict after the end of the target series.
input_chunk_length
The length of the target series the model takes as input.
output_chunk_length
The length of the target series the model emmits in output.
"""
super().__init__()
self.ds = GenericInferenceDataset(
target_series=target_series,
covariates=covariates,
n=n,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
covariate_type=covariate_type,
)
def __len__(self):
return len(self.ds)
def __getitem__(
self, idx: int
) -> Tuple[np.ndarray, Optional[np.ndarray], Optional[np.ndarray], TimeSeries]:
return self.ds[idx]
class FutureCovariatesInferenceDataset(InferenceDataset):
def __init__(
self,
target_series: Union[TimeSeries, Sequence[TimeSeries]],
covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
n: int = 1,
input_chunk_length: int = 12,
covariate_type: CovariateType = CovariateType.FUTURE,
):
"""
Contains (past_target, future_covariates) tuples
Parameters
----------
target_series
The target series that are to be predicted into the future.
covariates
Optionally, some future-known covariates that are used for predictions. This argument is required
if the model was trained with future-known covariates.
n
Forecast horizon: The number of time steps to predict after the end of the target series.
input_chunk_length
The length of the target series the model takes as input.
"""
super().__init__()
self.ds = GenericInferenceDataset(
target_series=target_series,
covariates=covariates,
n=n,
input_chunk_length=input_chunk_length,
output_chunk_length=n,
covariate_type=covariate_type,
)
def __len__(self):
return len(self.ds)
def __getitem__(
self, idx: int
) -> Tuple[np.ndarray, Optional[np.ndarray], TimeSeries]:
past_target_vals, _, cov_future, target_series = self.ds[idx]
return past_target_vals, cov_future, target_series
class DualCovariatesInferenceDataset(InferenceDataset):
def __init__(
self,
target_series: Union[TimeSeries, Sequence[TimeSeries]],
covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
n: int = 1,
input_chunk_length: int = 12,
output_chunk_length: int = 1,
):
"""
Contains (past_target, historic_future_covariates, future_covariates) tuples.
Parameters
----------
target_series
The target series that are to be predicted into the future.
covariates
Optionally, some future-known covariates that are used for predictions. This argument is required
if the model was trained with future-known covariates.
n
Forecast horizon: The number of time steps to predict after the end of the target series.
input_chunk_length
The length of the target series the model takes as input.
output_chunk_length
The length of the target series the model emmits in output.
"""
super().__init__()
# This dataset is in charge of serving historic future covariates
self.ds_past = PastCovariatesInferenceDataset(
target_series=target_series,
covariates=covariates,
n=n,
input_chunk_length=input_chunk_length,
output_chunk_length=output_chunk_length,
covariate_type=CovariateType.HISTORIC_FUTURE,
)
# This dataset is in charge of serving future covariates
self.ds_future = | |
<gh_stars>10-100
import os
import pickle
import math
from PIL import Image
import warnings
import datetime
import configparser
import numpy as np
np.random.seed(20200501)
warnings.filterwarnings("ignore")
"""Set seed and Init cuda"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2' # 只显示 warning 和 Error
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
class ModelUtils:
def __init__(self):
pass
@staticmethod
def model_copy(model, mode=''):
from scripts.mutation.mutation_utils import LayerUtils
import keras
suffix = '_copy_' + mode
if model.__class__.__name__ == 'Sequential':
new_layers = []
for layer in model.layers:
new_layer = LayerUtils.clone(layer)
new_layer.name += suffix
new_layers.append(new_layer)
new_model = keras.Sequential(layers=new_layers, name=model.name + suffix)
else:
new_model = ModelUtils.functional_model_operation(model, suffix=suffix)
s = datetime.datetime.now()
new_model.set_weights(model.get_weights())
e1 = datetime.datetime.now()
td1 = e1 - s
h, m, s = ToolUtils.get_HH_mm_ss(td1)
print("Set model weights! {} hour,{} min,{} sec".format(h, m, s))
del model
return new_model
@staticmethod
def functional_model_operation(model, operation=None, suffix=None):
from scripts.mutation.mutation_utils import LayerUtils
input_layers = {}
output_tensors = {}
model_output = None
for layer in model.layers:
for node in layer._outbound_nodes:
layer_name = node.outbound_layer.name
if layer_name not in input_layers.keys():
input_layers[layer_name] = [layer.name]
else:
input_layers[layer_name].append(layer.name)
output_tensors[model.layers[0].name] = model.input
for layer in model.layers[1:]:
layer_input_tensors = [output_tensors[l] for l in input_layers[layer.name]]
if len(layer_input_tensors) == 1:
layer_input_tensors = layer_input_tensors[0]
if operation is not None and layer.name in operation.keys():
x = layer_input_tensors
cloned_layer = LayerUtils.clone(layer)
if suffix is not None:
cloned_layer.name += suffix
x = operation[layer.name](x, cloned_layer)
else:
cloned_layer = LayerUtils.clone(layer)
if suffix is not None:
cloned_layer.name += suffix
x = cloned_layer(layer_input_tensors)
output_tensors[layer.name] = x
model_output = x
import keras
return keras.Model(inputs=model.inputs, outputs=model_output)
@staticmethod
def save_initial_weights(model):
weights = model.get_weights()
np.save('initial_weights.npy', weights)
@staticmethod
def load_initial_weights(model):
weights = np.load('initial_weights.npy')
model.set_weights(weights)
return model
@staticmethod
def save_layers_output(path, layers_output):
dirname = os.path.dirname(path)
if len(dirname)>0 and (not os.path.exists(dirname)):
os.makedirs(dirname)
with open(path,'wb') as f:
pickle.dump(layers_output,f)
@staticmethod
def load_layers_output(path):
if not os.path.exists(path):
return None
with open(path,'rb') as f:
layers_output = pickle.load(f)
return layers_output
@staticmethod
def layer_divation(model, model_nodes, layer_index, layers_output_1, layers_output_2, epsilon=1e-7):
layer = model.layers[layer_index]
# get all of its input layers
input_layers_index = []
for node in layer._inbound_nodes:
if node not in model_nodes:
continue
for l in node.inbound_layers:
from keras.engine.input_layer import InputLayer
if isinstance(l, InputLayer):
continue
# find the index of l in model
for i, model_layer in enumerate(model.layers):
if l == model_layer:
input_layers_index.append(i)
break
else:
raise Exception('can not find the layer in model')
# calculate the divation of current layer
cur_output_1 = layers_output_1[layer_index]
cur_output_2 = layers_output_2[layer_index]
delta_cur = MetricsUtils.delta(cur_output_1, cur_output_2)[0] # the second value of delta is sum()
if len(input_layers_index) == 0:
delta_pre = 0
else:
delta_pre_list = []
for i in input_layers_index:
pre_output_1 = layers_output_1[i]
pre_output_2 = layers_output_2[i]
delta_pre_list.append(MetricsUtils.delta(pre_output_1, pre_output_2)[0])
delta_pre = np.max(delta_pre_list, axis=0)
return delta_cur, (delta_cur - delta_pre) / (delta_pre + epsilon), [model.layers[i].name for i in input_layers_index]
@staticmethod
def layers_divation(model, layers_output_1, layers_output_2):
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
layers_divation = []
for i in range(len(model.layers)):
layers_divation.append(ModelUtils.layer_divation(model, relevant_nodes, i, layers_output_1, layers_output_2))
return layers_divation
@staticmethod
def layers_output(model, input):
from keras import backend as K
# print(K.backend()+" in loadmodel")
from keras.engine.input_layer import InputLayer
get_layer_output = K.function([model.layers[0].input, K.learning_phase()],
[l.output for l in
(model.layers[1:]
if isinstance(model.layers[0], InputLayer)
else model.layers)])
if isinstance(model.layers[0], InputLayer):
layers_output = [input]
layers_output.extend(get_layer_output([input, 0]))
else:
layers_output = get_layer_output([input, 0])
return layers_output
@staticmethod
def layers_input(model, input):
inputs = [[input]]
from keras import backend as K
from keras.engine.input_layer import InputLayer
for i, layer in enumerate(model.layers):
if i == 0:
continue
if i == 1 and isinstance(model.layers[0], InputLayer):
continue
get_layer_input = K.function([model.layers[0].input, K.learning_phase()],
layer.input if isinstance(layer.input, list) else [layer.input])
inputs.append(get_layer_input([input, 0]))
return inputs
@staticmethod
def generate_permutation(size_of_permutation, extract_portion):
assert extract_portion <= 1
num_of_extraction = math.floor(size_of_permutation * extract_portion)
permutation = np.random.permutation(size_of_permutation)
permutation = permutation[:num_of_extraction]
return permutation
@staticmethod
def shuffle(a):
shuffled_a = np.empty(a.shape, dtype=a.dtype)
length = len(a)
permutation = np.random.permutation(length)
index_permutation = np.arange(length)
shuffled_a[permutation] = a[index_permutation]
return shuffled_a
@staticmethod
def compile_model(model, optimer, loss, metric:list):
model.compile(optimizer=optimer,
loss=loss,
metrics=metric)
return model
@staticmethod
def custom_objects():
from scripts.mutation.mutation_utils import ActivationUtils
objects = {}
objects['no_activation'] = ActivationUtils.no_activation
objects['leakyrelu'] = ActivationUtils.leakyrelu
return objects
@staticmethod
def weighted_layer_indices(model):
indices = []
for i, layer in enumerate(model.layers):
weight_count = layer.count_params()
if weight_count > 0:
indices.append(i)
return indices
@staticmethod
def is_valid_model(inputs_backends,backends_nums, threshold=0.95):
invalid_status_num = 0
inputs_values = list(inputs_backends.values())
# results like (1500,1) is valid
if inputs_values[0].shape[1] == 1:
return True
else:
for inputs in inputs_backends.values():
indice_map = {}
for input in inputs:
max_indice = np.argmax(input)
if max_indice not in indice_map.keys():
indice_map[max_indice] = 1
else:
indice_map[max_indice] += 1
for indice in indice_map.keys():
if indice_map[indice] > len(inputs) * threshold:
invalid_status_num += 1
return False if invalid_status_num == backends_nums else True
class DataUtils:
@staticmethod
def image_resize(x, shape):
x_return = []
for x_test in x:
tmp = np.copy(x_test)
img = Image.fromarray(tmp.astype('uint8')).convert('RGB')
img = img.resize(shape, Image.ANTIALIAS)
x_return.append(np.array(img))
return np.array(x_return)
@staticmethod
def get_data_by_exp(exp):
import keras
import keras.backend as K
K.set_image_data_format("channels_last")
lemon_cfg = configparser.ConfigParser()
lemon_cfg.read("./config/experiments.conf")
dataset_dir = lemon_cfg['parameters']['dataset_dir']
x_test = y_test = []
if 'fashion-mnist' in exp:
_, (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
x_test = DataUtils.get_fashion_mnist_data(x_test)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
elif 'mnist' in exp:
_, (x_test, y_test) = keras.datasets.mnist.load_data()
x_test = DataUtils.get_mnist_data(x_test)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
elif 'cifar10' in exp:
_, (x_test, y_test) = keras.datasets.cifar10.load_data()
x_test = DataUtils.get_cifar10_data(x_test)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
elif 'imagenet' in exp:
input_precessor = DataUtils.imagenet_preprocess_dict()
input_shapes_dict = DataUtils.imagenet_shape_dict()
model_name = exp.split("-")[0]
shape = input_shapes_dict[model_name]
data_path = os.path.join(dataset_dir,"sampled_imagenet-1500.npz")
data = np.load(data_path)
x, y = data['x_test'], data['y_test']
x_resize = DataUtils.image_resize(np.copy(x),shape)
x_test = input_precessor[model_name](x_resize)
y_test = keras.utils.to_categorical(y, num_classes=1000)
elif 'sinewave' in exp:
"""
see more details in
https://github.com/StevenZxy/CIS400/tree/f69489c0624157ae86b5d8ddb1fa99c89a927256/code/LSTM-Neural-Network-for-Time-Series-Prediction-master
"""
import pandas as pd
dataframe = pd.read_csv(f"{dataset_dir}/sinewave.csv")
test_size,seq_len = 1500, 50
data_test = dataframe.get("sinewave").values[-(test_size + 50):]
data_windows = []
for i in range(test_size):
data_windows.append(data_test[i:i + seq_len])
data_windows = np.array(data_windows).astype(float).reshape((test_size,seq_len,1))
data_windows = np.array(data_windows).astype(float)
x_test = data_windows[:, :-1]
y_test = data_windows[:, -1, [0]]
elif 'price' in exp:
"""see more details in https://github.com/omerbsezer/LSTM_RNN_Tutorials_with_Demo/tree/master/StockPricesPredictionProject"""
x_test, y_test = DataUtils.get_price_data(dataset_dir)
# TODO: Add your own data preprocessing here
# Note: The returned inputs should be preprocessed and labels should decoded as one-hot vector which could be directly feed in model.
# Both of them should be returned in batch, e.g. shape like (1500,28,28,1) and (1500,10)
# elif 'xxx' in exp:
# x_test, y_test = get_your_data(dataset_dir)
return x_test, y_test
@staticmethod
def save_img_from_array(path,array,index,exp):
im = Image.fromarray(array)
#path = path.rstrip("/")
#save_path = "{}/{}_{}.png".format(path,exp,index)
save_path = os.path.join(path,"{}_{}.png".format(exp, index))
im.save(save_path)
return save_path
@staticmethod
def shuffled_data(x, y, bs=None):
ds = x.shape[0]
all_idx = np.arange(ds)
np.random.shuffle(all_idx)
shuffle_idx = all_idx
# shuffle_idx = all_idx[:bs]
return x[shuffle_idx], y[shuffle_idx]
@staticmethod
def get_mnist_data(x_test):
x_test = x_test.astype('float32') / 255.0
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
return x_test
@staticmethod
def get_fashion_mnist_data(x_test):
x_test = x_test.astype('float32') / 255.0
w, h = 28, 28
x_test = x_test.reshape(x_test.shape[0], w, h, 1)
return x_test
@staticmethod
def get_cifar10_data(x_test):
x_test = x_test.astype('float32') / 255.0
w, h = 32, 32
x_test = x_test.reshape(x_test.shape[0], w, h, 3)
return x_test
@staticmethod
def get_price_data(data_dir):
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
input_file = os.path.join(data_dir,"DIS.csv")
df = pd.read_csv(input_file, header=None, index_col=None, delimiter=',')
all_y = df[5].values
dataset = all_y.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.5)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size, :], dataset[train_size:len(dataset), :]
# reshape into X=t and Y=t+1, timestep 240
look_back = 240
trainX, trainY = create_dataset(train, look_back)
# reshape input to be [samples, time steps, features]
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
return trainX,trainY
@staticmethod
def imagenet_preprocess_dict():
import keras
keras_preprocess_dict = dict()
keras_preprocess_dict['resnet50'] = keras.applications.resnet50.preprocess_input
keras_preprocess_dict['densenet121'] = keras.applications.densenet.preprocess_input
keras_preprocess_dict['mobilenet.1.00.224'] = keras.applications.mobilenet.preprocess_input
keras_preprocess_dict['vgg16'] = keras.applications.vgg16.preprocess_input
keras_preprocess_dict['vgg19'] = keras.applications.vgg19.preprocess_input
keras_preprocess_dict['inception.v3'] = keras.applications.inception_v3.preprocess_input
keras_preprocess_dict['inception.v2'] = keras.applications.inception_resnet_v2.preprocess_input
keras_preprocess_dict['xception'] = keras.applications.xception.preprocess_input
return keras_preprocess_dict
@staticmethod
def imagenet_shape_dict():
image_shapes = dict()
image_shapes['resnet50'] = (224,224)
image_shapes['densenet121'] = (224,224)
image_shapes['mobilenet.1.00.224'] = (224,224)
image_shapes['vgg16'] = (224,224)
image_shapes['vgg19'] = (224, 224)
image_shapes['inception.v3'] = (299,299)
image_shapes['inception.v2'] = (299, 299)
image_shapes['xception'] = (299,299)
return image_shapes
class ToolUtils:
@staticmethod
def select_mutant(roulette,**kwargs):
return roulette.choose_mutant()
@staticmethod
def select_mutator(logic, **kwargs):
# import numpy as np
# return np.random.permutation(mutate_ops)[0]
last_used_mutator = kwargs['last_used_mutator']
return logic.choose_mutator(last_used_mutator)
@staticmethod
def get_HH_mm_ss(td):
days, seconds = td.days, td.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
secs = | |
relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.fields is not None:
r["fields"] = save(
self.fields, top=False, base_url=base_url, relative_uris=relative_uris
)
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["fields", "type"])
class EnumSchema(Savable):
"""
Define an enumerated type.
"""
def __init__(
self,
symbols: Any,
type: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.symbols = symbols
self.type = type
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "EnumSchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
symbols = load_field(
_doc.get("symbols"),
uri_array_of_strtype_True_False_None,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `symbols` field is not valid because:",
SourceLine(_doc, "symbols", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Enum_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `symbols`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'EnumSchema'", None, _errors__)
return cls(
symbols=symbols,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.symbols is not None:
u = save_relative_uri(self.symbols, base_url, True, None, relative_uris)
if u:
r["symbols"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["symbols", "type"])
class ArraySchema(Savable):
def __init__(
self,
items: Any,
type: Any,
extension_fields: Optional[Dict[str, Any]] = None,
loadingOptions: Optional[LoadingOptions] = None,
) -> None:
if extension_fields:
self.extension_fields = extension_fields
else:
self.extension_fields = CommentedMap()
if loadingOptions:
self.loadingOptions = loadingOptions
else:
self.loadingOptions = LoadingOptions()
self.items = items
self.type = type
@classmethod
def fromDoc(
cls,
doc: Any,
baseuri: str,
loadingOptions: LoadingOptions,
docRoot: Optional[str] = None,
) -> "ArraySchema":
_doc = copy.copy(doc)
if hasattr(doc, "lc"):
_doc.lc.data = doc.lc.data
_doc.lc.filename = doc.lc.filename
_errors__ = []
try:
items = load_field(
_doc.get("items"),
uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `items` field is not valid because:",
SourceLine(_doc, "items", str),
[e],
)
)
try:
type = load_field(
_doc.get("type"),
typedsl_Array_symbolLoader_2,
baseuri,
loadingOptions,
)
except ValidationException as e:
_errors__.append(
ValidationException(
"the `type` field is not valid because:",
SourceLine(_doc, "type", str),
[e],
)
)
extension_fields: Dict[str, Any] = {}
for k in _doc.keys():
if k not in cls.attrs:
if ":" in k:
ex = expand_url(
k, "", loadingOptions, scoped_id=False, vocab_term=False
)
extension_fields[ex] = _doc[k]
else:
_errors__.append(
ValidationException(
"invalid field `{}`, expected one of: `items`, `type`".format(
k
),
SourceLine(_doc, k, str),
)
)
break
if _errors__:
raise ValidationException("Trying 'ArraySchema'", None, _errors__)
return cls(
items=items,
type=type,
extension_fields=extension_fields,
loadingOptions=loadingOptions,
)
def save(
self, top: bool = False, base_url: str = "", relative_uris: bool = True
) -> Dict[str, Any]:
r: Dict[str, Any] = {}
for ef in self.extension_fields:
r[prefix_url(ef, self.loadingOptions.vocab)] = self.extension_fields[ef]
if self.items is not None:
u = save_relative_uri(self.items, base_url, False, 2, relative_uris)
if u:
r["items"] = u
if self.type is not None:
r["type"] = save(
self.type, top=False, base_url=base_url, relative_uris=relative_uris
)
# top refers to the directory level
if top:
if self.loadingOptions.namespaces:
r["$namespaces"] = self.loadingOptions.namespaces
if self.loadingOptions.schemas:
r["$schemas"] = self.loadingOptions.schemas
return r
attrs = frozenset(["items", "type"])
class File(Savable):
"""
Represents a file (or group of files when `secondaryFiles` is provided) that
will be accessible by tools using standard POSIX file system call API such as
open(2) and read(2).
Files are represented as objects with `class` of `File`. File objects have
a number of properties that provide metadata about the file.
The `location` property of a File is a URI that uniquely identifies the
file. Implementations must support the file:// URI scheme and may support
other schemes such as http://. The value of `location` may also be a
relative reference, in which case it must be resolved relative to the URI
of the document it appears in. Alternately to `location`, implementations
must also accept the `path` property on File, which must be a filesystem
path available on the same host as the CWL runner (for inputs) or the
runtime environment of a command line tool execution (for command line tool
outputs).
If no `location` or `path` is specified, a file object must specify
`contents` with the UTF-8 text content of the file. This is a "file
literal". File literals do not correspond to external resources, but are
created on disk with `contents` with when needed for a executing a tool.
Where appropriate, expressions can return file literals to define new files
on a runtime. The maximum size of `contents` is 64 kilobytes.
The `basename` property defines the filename on disk where the file is
staged. This may differ from the resource name. If not provided,
`basename` must be computed from the last path part of `location` and made
available to expressions.
The `secondaryFiles` property is a list of File or Directory objects that
must be staged in the same directory as the primary file. It is an error
for file names to be duplicated in `secondaryFiles`.
The `size` property is the size in bytes of the File. It must be computed
from the resource and made available to expressions. The `checksum` field
contains a cryptographic hash of the file content for use it verifying file
contents. Implementations may, at user option, enable or disable
computation of the `checksum` field for performance or other reasons.
However, the ability to compute output checksums is required to pass the
CWL conformance test suite.
When executing a CommandLineTool, the files and secondary files may be
staged to an arbitrary directory, but must use the value of `basename` for
the filename. The `path` property must be file path in the context of the
tool execution runtime (local to the compute node, or within the executing
container). All computed properties should be available to expressions.
File literals also must be staged and `path` must be set.
When collecting CommandLineTool outputs, `glob` matching returns file paths
(with the `path` property) and the derived properties. This can all be
modified by `outputEval`. Alternately, if the file `cwl.output.json` is
present in the output, `outputBinding` is ignored.
File objects in the output must provide either a `location` URI or a `path`
property in the context of the tool execution runtime (local to the compute
node, or within the executing container).
When evaluating an ExpressionTool, file objects must be referenced via
`location` (the expression tool does not have access to files on disk so
`path` is meaningless) or as file literals. It is legal to return a file
object with an existing `location` but a different `basename`. The
`loadContents` field of ExpressionTool inputs behaves the same as on
CommandLineTool inputs, however it is not meaningful on the outputs.
An ExpressionTool may forward file references from input to output by using
the same value for `location`.
"""
def __init__(
self,
location: Optional[Any] = None,
path: Optional[Any] = None,
basename: Optional[Any] = None,
dirname: Optional[Any] = None,
nameroot: Optional[Any] = None,
nameext: Optional[Any] = None,
checksum: Optional[Any] = None,
size: Optional[Any] = None,
secondaryFiles: Optional[Any] = None,
format: Optional[Any] | |
"""Twitter class for f451 Communications module.
This module adds an abstraction layer to the Tweepy Twitter API package and the main
purpose is to provide a standard interface for some core methods for sending Twitter
status updates and DMs to specific recipients.
Note:
This module assumes that we have an active Twitter developer account.
Note:
We use Twitter API v1.1 for all Tweets as the new Twitter API v2 does not yet
support media uploads.
"""
import logging
import pprint
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
import tweepy
import f451_comms.constants as const
import f451_comms.providers.provider as provider
import f451_comms.utils as utils
from f451_comms.entity import dedupe_by_attribute
from f451_comms.entity import Entity
from f451_comms.entity import process_entity_list_by_key
from f451_comms.exceptions import CommunicationsError
from f451_comms.exceptions import MissingAttributeError
from f451_comms.processor import AttributeProcessor
# =========================================================
# G L O B A L S A N D H E L P E R S
# =========================================================
SRV_CONFIG_SCTN: str = "f451_twitter"
SRV_PROVIDER: str = "Twitter"
_VALID_IMG_FMTS_: List[str] = ["jpeg", "png", "gif", "webp"]
_MAX_RECIPIENTS_: int = 100 # Max number of DM recipients
_MAX_TWEET_LEN_: int = 280 # Max char len for Tweets
_MAX_DM_LEN_: int = 10000 # Max char len for DMs
_MAX_IMG_SIZE_: int = 5 # Max image size in MB
_MAX_VID_SIZE_: int = 15 # Max video/anim GIF size in MB
_MAX_NUM_IMG_: int = 4 # Max 4 images allowed ...
_MAX_NUM_VID_: int = 1 # ... max 1 video or anim GIF
_MAX_NUM_DM_MEDIA_: int = 1 # Max 1 image/video allowed for DMs
log = logging.getLogger()
pp = pprint.PrettyPrinter(indent=4)
# =========================================================
# T W I T T E R U T I L I T Y C L A S S E S
# =========================================================
class ToTwitter(AttributeProcessor):
"""Processor class for recipient ('to') Twitter name lists.
This class is only used for DM recipients. For normal status updates,
'to'-lists are simply converted to '@'-lists.
Attributes:
inList:
Single Twitter name (string) or list with one or more Twitter names
maxNum:
Max number of Twitter names in list
strict:
If 'True' then exception is raised if Twitter names list is empty
Raises:
MissingAttributeError: 'strict' mode and list of Twitter names is empty
"""
def __init__(self, inList: Any, maxNum: int, strict: bool = True) -> None:
super().__init__(
keyword=const.KWD_TO_TWITTER,
required=const.ATTR_REQUIRED,
)
self._data: List[Entity] = []
self._strict = strict
self._minNum = 1 # we require min 1 Twitter DM recipient
self.maxNum = maxNum
self.data = inList
@property
def data(self) -> List[Entity]:
"""Return 'data' property."""
return self._data
@data.setter
def data(self, inList: Any) -> None:
"""Set 'data' property."""
tmpList = process_recipient_list(inList, self._maxNum)
if len(tmpList) < self._minNum:
log.error("Blank DM recipient.")
self._data = []
self._valid = False
if self._strict:
raise MissingAttributeError("DM recipient cannot be blank.")
else:
self._data = tmpList
self._valid = True
@property
def minNum(self) -> int:
"""Return 'minNum' property."""
return self._minNum
@property
def maxNum(self) -> int:
"""Return 'maxNum' property."""
return self._maxNum
@maxNum.setter
def maxNum(self, val: int) -> None:
"""Set 'maxNum' property."""
# 'max' num cannot be smaller than 'min'
self._maxNum = max(self._minNum, val)
@property
def totNum(self) -> int:
"""Return 'totNum' property."""
return len(self._data)
@property
def raw(self) -> List[Entity]:
"""Return 'raw' list of 'Entity' objects."""
return self._data
@property
def clean(self) -> List[str]:
"""Return 'clean' list of Twitter name strings."""
return [item.twitter for item in self._data]
# =========================================================
# M A I N C L A S S D E F I N I T I O N
# =========================================================
class Twitter(provider.Provider):
"""Twitter class for f451 Communications module.
Use this support class to send messages via Twilio.
Attributes:
usrKey:
Twitter user/consumer key
usrSecret:
Twitter user/consumer secret
authToken:
Twitter auth/access token
authSecret:
Twitter auth/access token secret
"""
def __init__(
self,
usrKey: str,
usrSecret: str,
authToken: str,
authSecret: str,
**kwargs: Any,
) -> None:
super().__init__(const.SRV_TYPE_FORUMS, SRV_PROVIDER, SRV_CONFIG_SCTN)
self._isValidCreds: bool = False
self._client: Any = None
self.client = (usrKey, usrSecret, authToken, authSecret)
self.defaultTags = kwargs.get(const.KWD_TAGS, "")
self.defaultTo = kwargs.get(const.KWD_TO, kwargs.get(const.KWD_TO_TWITTER, ""))
@property
def client(self) -> Any:
"""Return 'totNum' property."""
return self._client
@client.setter
def client(self, inAuth: Tuple[str, str, str, str]) -> None:
usrKey, usrSecret, authToken, authSecret = inAuth
try:
log.debug("Verifying Twitter credentials")
auth = tweepy.OAuth1UserHandler(usrKey, usrSecret)
auth.set_access_token(authToken, authSecret)
api = tweepy.API(auth, wait_on_rate_limit=True)
api.verify_credentials()
self._isValidCreds = True
self._client = api
except tweepy.errors.TweepyException as e:
log.error("Invalid Twitter credentials")
raise CommunicationsError("Invalid Twitter credentials") from e
@property
def defaultTo(self) -> List[Entity]:
"""Return 'defaultTo' property."""
return self._defaultTo
@defaultTo.setter
def defaultTo(self, val: Any) -> None:
"""Set 'defaultTo' property."""
if isinstance(val, Entity):
self._defaultTo = [val]
elif const.DELIM_VAL in val and const.DELIM_STD in val:
self._defaultTo = [Entity(**utils.process_key_value_map(val))]
else:
self._defaultTo = process_recipient_list(val, _MAX_RECIPIENTS_)
@property
def defaultTags(self) -> List[str]:
"""Return 'defaultTags' property."""
return self._defaultTags
@defaultTags.setter
def defaultTags(self, val: Any) -> None:
"""Set 'defaultTags' property."""
self._defaultTags = utils.convert_attrib_str_to_list(val)
@property
def timeline(self) -> Any:
"""Return 'timeline' property."""
return self._client.home_timeline() if self._client else None
@property
def isValidCreds(self) -> bool:
"""Return 'isValidCreds' property."""
return self._isValidCreds
@staticmethod
def _process_at_list(inList: Any) -> str:
"""Create string with list of '@' usernames.
This method can take a single name string, single Entity, or list of
either and create a string with one or more '@' names.
Args:
inList:
Single name string, list of names, entity, or list of entities
Returns:
String with zero or more '@' names
"""
if isinstance(inList, Entity) or (
isinstance(inList, list)
and all(isinstance(item, Entity) for item in inList)
):
return process_entity_list_by_key(inList, const.KWD_TWITTER, "@", " ")
else:
return utils.process_string_list(inList, "@", " ")
def _process_dm_list(self, inList: List[str]) -> List[Tuple[str, str]]:
"""Get Twitter IDs for a set of given Twitter names.
We always want to create a list of recipients even if there is only 1 DM recipient. This
allows us to easily send DMs to one more recipients with a simple loop.
Args:
inList:
List of Twitter name strings
Returns:
List of tuples with recipient Twitter names and IDs.
"""
return [
(dmName, self.get_user_id(dmName, True))
for dmName in inList
if dmName.strip()
]
def _make_msg_content(
self, msg: str, atList: Any = None, tagList: Any = None
) -> str:
"""Assemble Twitter message/post.
This method will process any name in 'toList' and any tags in 'tagList'
and sandwich the post message in between so the final message string
looks as follows:
'@name Hello world! #someTag'
Args:
msg:
Message string
atList:
Single name string, list of names, entity, or list of entities
tagList:
Single tag string or list of tags
Returns:
Final message string
"""
# Do we need to '@' somebody and/or do we have hashtags?
atList = self._process_at_list(atList)
tagList = utils.process_string_list(tagList, "#", " ")
# Do some 'assemble magic' and cut off at max len
return (
(" ".join([atList.strip(), msg.strip(), tagList.strip()])).strip()[
:_MAX_TWEET_LEN_
]
).strip()
def _make_comm_error(
self, msg: str, data: Any = None, erc: Any = None
) -> Dict[str, Any]:
return {
"provider": self._srvName,
"data": data,
"errors": [str(erc)] if erc else None,
"response": None,
"message": msg,
}
def _upload_media(
self,
inList: List[str],
maxMedia: int = _MAX_NUM_IMG_,
strict: bool = False,
) -> List[str]:
"""Upload media item to Twitter.
This method will verify that an image files have valid types and then upload
them to Twitter. The resulting media ID is then used in the status update
message or DM.
Args:
inList:
list of file names
maxMedia:
max number of media file to upload
strict:
If 'True' then exception is raised when file does not exist,
otherwise empty list is returned
Returns:
List of media IDs
Raises:
CommunicationsError: Twitter/Tweepy API returns an error
FileNotFoundError: Media file cannot be found
"""
if not self._client or not inList or maxMedia < 1:
return []
outList: List[str] = []
try:
mediaList = [
self._client.media_upload(item)
for item in inList[:maxMedia]
if provider.verify_media_file(item, _VALID_IMG_FMTS_, strict)
]
outList = [item.media_id for item in mediaList]
except FileNotFoundError as e:
if strict:
log.error(f"FileNotFoundError: {e}")
raise FileNotFoundError from e
except tweepy.HTTPException as e:
if strict:
log.error(f"HTTP error: {e}")
raise CommunicationsError(errors=[f"HTTPException {e}"]) from e
return outList
def get_user_id(self, dmUserName: str, strict: bool = False) -> str:
"""Get Twitter user ID.
This method provides a standard interface for retrieving the user ID
for a given Twitter username.
Args:
dmUserName:
simple/plain text version of message to be sent
strict:
If 'True' then exception is raised when user does not | |
# widgets
from tkinter import *
from tkinter import simpledialog
from tkinter import ttk
from tkinter import messagebox as msg
import tkinter as tk
from tkinter.filedialog import asksaveasfilename as save
from tkinter.filedialog import askopenfilename as openfile
from tkinter.colorchooser import *
from tkinterhtml import HtmlFrame
from tk_tools import Calendar
from urllib.request import urlopen
import pyscreenshot as ImageGrab
import cv2, io, glob
from random import shuffle
import belfrywidgets as belfry
from gooey import *
import gooey
from ttkwidgets.autocomplete import AutocompleteCombobox
from highlighter import *
from PIL import ImageTk, Image
class CustomNotebook(ttk.Notebook):
"""A ttk Notebook with close buttons on each tab"""
__initialized = False
def __init__(self, *args, **kwargs):
if not self.__initialized:
self.__initialize_custom_style()
self.__inititialized = True
kwargs["style"] = "CustomNotebook"
ttk.Notebook.__init__(self, *args, **kwargs)
self._active = None
self.bind("<ButtonPress-1>", self.on_close_press, True)
self.bind("<ButtonRelease-1>", self.on_close_release)
def on_close_press(self, event):
"""Called when the button is pressed over the close button"""
element = self.identify(event.x, event.y)
if "close" in element:
index = self.index("@%d,%d" % (event.x, event.y))
self.state(['pressed'])
self._active = index
def on_close_release(self, event):
"""Called when the button is released over the close button"""
if not self.instate(['pressed']):
return
element = self.identify(event.x, event.y)
index = self.index("@%d,%d" % (event.x, event.y))
if "close" in element and self._active == index:
self.forget(index)
self.event_generate("<<NotebookTabClosed>>")
self.state(["!pressed"])
self._active = None
def __initialize_custom_style(self):
style = ttk.Style()
self.images = (
PhotoImage("img_close", data='''
R0lGODlhCAAIAMIBAAAAADs7O4+Pj9nZ2Ts7Ozs7Ozs7Ozs7OyH+EUNyZWF0ZWQg
d2l0aCBHSU1QACH5BAEKAAQALAAAAAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU
5kEJADs=
'''),
PhotoImage("img_closeactive", data='''
R0lGODlhCAAIAMIEAAAAAP/SAP/bNNnZ2cbGxsbGxsbGxsbGxiH5BAEKAAQALAAA
AAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU5kEJADs=
'''),
PhotoImage("img_closepressed", data='''
R0lGODlhCAAIAMIEAAAAAOUqKv9mZtnZ2Ts7Ozs7Ozs7Ozs7OyH+EUNyZWF0ZWQg
d2l0aCBHSU1QACH5BAEKAAQALAAAAAAIAAgAAAMVGDBEA0qNJyGw7AmxmuaZhWEU
5kEJADs=
''')
)
style.element_create("close", "image", "img_close",
("active", "pressed", "!disabled", "img_closepressed"),
("active", "!disabled", "img_closeactive"),
border=8, sticky='')
style.layout("CustomNotebook", [("CustomNotebook.client", {"sticky": "nswe"})])
style.layout("CustomNotebook.Tab", [
("CustomNotebook.tab", {
"sticky": "nswe",
"children": [
("CustomNotebook.padding", {
"side": "top",
"sticky": "nswe",
"children": [
("CustomNotebook.focus", {
"side": "top",
"sticky": "nswe",
"children": [
("CustomNotebook.label", {"side": "left", "sticky": ''}),
("CustomNotebook.close", {"side": "left", "sticky": ''}),
]
})
]
})
]
})
])
class HTML(HtmlFrame):
def __init__(self, root):
super().__init__(root, horizontal_scrollbar="auto")
self.pack(expand=1, fill=BOTH, side=RIGHT)
self.set("<html></html>")
def set(self, html):
self.set_content(html)
def seturl(self, url):
self.set_content(urlopen(url).read().decode())
def setfile(self, filename):
with open(filename) as file: self.set_content(file.read())
class ProgressBar(belfry.ProgressBar):
def __init__(self, master, max=200, delay=0.1, bd="#446", fg="red", bg="cyan"):
v = DoubleVar(value=0)
super().__init__(
master,
mode=belfry.DETERMINATE,
maximum=max, variable=v,
bordercolor=bd,
foreground=fg,
background=bg
)
self.delay = int(delay*1000)
self.master = master
self.max = max
self.v = v
self.pack(fill=BOTH, expand=1, padx=10, pady=10)
def start(self, **kwargs):
self.v.set(self.v.get()+1)
if self.v.get() < self.max:
self.after(self.delay, self.start)
class Wiz(belfry.Wizard):
def __init__(self, cancel, finish):
super().__init__(width=640, height=480, cancelcommand=cancel, finishcommand=finish)
self.show = self.show_pane
def add(self, name, label, last=False, entry=lambda: 1, prev=lambda: 1, next=lambda: 1):
if last:
def entry():
entry()
self.set_finish_enabled(True)
def prev():
prev()
self.set_finish_enabled(False)
super().add_pane(name, label, entrycommand=entry, prevcommand=prev, nextcommand=next)
def isValidGmail(email):
if len(email) > 10: return bool(re.match("^.+<EMAIL>", email))
class GetUserNamePassword(simpledialog.Dialog):
def body(self, master):
Label(master, text="Email:").grid(row=0)
Label(master, text="Password:").grid(row=1)
self.e1 = Entry(master)
self.e2 = Entry(master, show="\u2022")
self.e1.grid(row=0, column=1)
self.e2.grid(row=1, column=1)
return self.e1 # initial focus
def apply(self):
first = str(self.e1.get())
second = str(self.e2.get())
if isValidGmail(first):
self.result = (first, second)
def get_image(url):
fd = urlopen(url)
image_file = io.BytesIO(fd.read())
im = Image.open(image_file)
return im
class WallPaper(ttk.Frame):
def __init__(self, master):
super().__init__(master)
self.cnt = 1
self.images = list(map(Image.open, glob.glob('images/*.png')))
shuffle(self.images)
img = ImageTk.PhotoImage(self.images[0])
self.pic = Label(self, bg='black', image=img)
self.pic.image = img
self.pic.pack(fill=BOTH, expand=1)
self.pic.bind('<Enter>', self.show_images)
def show_images(self):
img_object = ImageTk.PhotoImage(self.images[self.cnt % 150])
self.pic.configure(image=img_object)
self.pic.image = img_object
self.cnt += 1
self.after(1000, self.show_images)
def alter(self):
img_object = self.images[self.cnt % 150]
current_object = self.images[self.cnt % 150-1]
for alpha in range(100):
current_object = Image.blend(current_object, img_object, alpha/100)
time.sleep(0.05)
self.pic.configure(image=ImageTk.PhotoImage(current_object))
self.pic.image = ImageTk.PhotoImage(current_object)
class Stopwatch:
def __init__(self, master):
self.counter = IntVar(value=-3)
self.running = BooleanVar(value=False)
self.frame = Frame(master, bg='white')
self.frame.pack(fill=BOTH, expand=1)
self.label = Label(self.frame, text="Welcome!", fg="black", bg='white', font=("Quicksand", 30, "bold"))
self.label.pack(fill=BOTH, expand=1, side=TOP)
self.start = self.getFormattedButton(text="Start", command=self.start)
self.stop = self.getFormattedButton(text="Stop", command=self.stop, state="disabled")
self.reset = self.getFormattedButton(text="Start", command=self.reset, state="disabled")
self.start.pack(fill=BOTH, expand=1, side=TOP)
self.stop.pack(fill=BOTH, expand=1, side=TOP)
self.reset.pack(fill=BOTH, expand=1, side=TOP)
def getFormattedButton(self, **kwargs):
return Button(self.frame, text='Start', fg="black", bg='white', width=15, **kwargs)
def counter_label(self):
if self.running.get():
if self.counter.get() >= 0:
self.label['text'] = str(self.counter.get())
self.label.after(1000, self.counter_label)
else:
self.label['text'] = ["Ready", "Set", "Go"][self.counter.get()]
self.label.after(300, self.counter_label)
self.counter.set(self.counter.get()+1)
def start(self):
"""
Start function of the stopwatch
"""
self.running.set(value=True)
self.counter_label()
self.start['state'] = 'disabled'
self.stop['state'] = 'normal'
self.reset['state'] = 'normal'
def stop(self):
"""
Stop function of the stopwatch
"""
self.running.set(value=False)
self.start['state'] = 'normal'
self.stop['state'] = 'disabled'
self.reset['state'] = 'normal'
def reset(self):
"""
Reset function of the stopwatch
"""
self.counter.set(-3)
self.label['text'] = ''
# If reset is pressed after pressing stop.
if not self.running.get(): self.reset['state'] = 'disabled'
class Canva(Canvas):
def __init__(self, root=None, bg='white'):
image_file = openfile()
if root is None:
master = Tk()
master.title('Canva - An Improved Image Mover')
master.state('zoomed')
else: master = root
self.photo = ImageTk.PhotoImage(Image.open(image_file), master=master)
self.frame = Frame(master, bg='white')
self.frame.pack(fill='both', expand=1)
Canvas.__init__(self, self.frame, bg=bg)
self.pack(fill='both', expand=1)
self.img = self.create_image((0, 0), image=self.photo, state="normal", anchor='nw')
self.bind("<B3-Motion>", self.move_image)
if root is None: master.mainloop()
def move_image(self, event):
self.delete(self.img)
x = event.x
y = event.y
self.img = self.create_image(x, y, image=self.photo, anchor='nw')
self.update()
class Webcam(Frame):
def __init__(self, master):
Frame.__init__(self, master, bg='white')
self.root = Frame(master, bg='white')
width, height = 800, 600
self.cap = cv2.VideoCapture(0)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
label = Label(self.root, text='Webcam Footage', fg="red", font=('Trebuchet MS', 15), bg='white')
label.pack(side=TOP)
self.lmain = Label(self.root)
self.lmain.pack(fill=BOTH, expand=1, side=TOP)
Button(self.root, text='Play', bg='#19A7A7', fg='white', command=self.toggle_show).pack(side=BOTTOM)
self.boolean = False
_, frame = self.cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.lmain.imgtk = imgtk
self.lmain.configure(image=imgtk)
def pack(self, *args, **kwargs):
self.root.pack(*args, **kwargs)
def toggle_show(self):
self.boolean = not self.boolean
self.show()
def show(self):
_, frame = self.cap.read()
frame = cv2.flip(frame, 1)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=img)
self.lmain.imgtk = imgtk
self.lmain.configure(image=imgtk)
if self.boolean:
self.lmain.after(10, self.show)
'''
def download_music():
url.urlretrieve("https://i.stack.imgur.com/IgD2r.png", "lenna.png")
url.urlretrieve("https://i.stack.imgur.com/sML82.gif", "lenna.gif")
'''
def cal():
root = Tk()
root.title('clock')
root.iconbitmap('clock.ico')
Calendar(root).pack(fill=BOTH, expand=True)
root.mainloop()
# def calc(master=None):
# if master is None: window = Tk()
# else: window = Frame(master)
#
# ans = None
#
# def btn_click(item):
# expression = express.get()
# expression = expression + str(item)
# input_text.set(expression)
# express.set(expression)
#
# def btn_clear():
# express.set("")
# input_text.set("")
#
# def btn_equal():
# expression = express.get()
# result = str(eval(expression, globals())) # 'eval' function evalutes the string expression directly
# input_text.set(result)
# ans = eval(result)
# express.set('')
#
# express = StringVar(value='')
# input_text = StringVar()
#
# name = Label(window, text='Calculator', fg="red", font=('Trebuchet MS', 15))
# name.pack(side=TOP)
#
# input_frame = Frame(window, width=312, height=0,
# highlightbackground="black", highlightcolor="black", highlightthickness=1)
# input_frame.pack(side=TOP)
#
# input_field = Entry(input_frame, font=('Trebuchet MS', 12), textvariable=input_text,
# width=33, bg="#eee", justify=LEFT)
# input_field.pack(side=BOTTOM)
#
# input_field.pack(ipady=10) # 'ipady' is internal padding to increase the height of input field
# btns_frame = Frame(window, width=312, height=272.5, bg="grey")
# btns_frame.pack()
#
# # first row
# clear = Button(btns_frame, text = "C", fg = "black", width = 30, height = 3, bd = 0, bg = "#eee", cursor = "hand2", command = lambda: btn_clear()).grid(row = 0, column = 0, columnspan = 3, padx = 0.5, pady = 0.5)
# # ans = Button(btns_frame, text = "ans", fg = "black", width = 15, height = 3, bd = 0, bg = "#eee", cursor = "hand2", command = lambda: btn_click("ans")).grid(row = 0, column = 2, columnspan = 1, padx = 0.5, pady = 0.5)
# divide = Button(btns_frame, text = "/", fg = "black", width = 10, height = 3, bd = 0, bg = "#eee", cursor = "hand2", command = lambda: btn_click("/")).grid(row = 0, column = 3, padx = 0.5, pady = 0.5)
#
# # second row
# seven = Button(btns_frame, text = "7", fg = "black", width = 10, height = 3, bd = 0, bg = "#fff", cursor = "hand2", command = lambda: btn_click(7)).grid(row = 1, column = 0, padx = 0.5, pady = 0.5)
# eight = Button(btns_frame, text = "8", fg = "black", width = 10, height = 3, bd = 0, bg = "#fff", cursor = "hand2", command = lambda: btn_click(8)).grid(row = 1, column = 1, padx = 0.5, pady = 0.5)
# nine = Button(btns_frame, text = "9", fg = "black", width = 10, height = 3, bd = 0, bg = "#fff", cursor = "hand2", command = lambda: btn_click(9)).grid(row = 1, column = 2, padx = 0.5, pady = 0.5)
# multiply = Button(btns_frame, text = "*", fg = "black", width = 10, height = 3, bd = 0, bg = "#eee", cursor = "hand2", command = lambda: btn_click("*")).grid(row = 1, column = 3, padx = 0.5, pady = 0.5)
#
# # third row
# four = Button(btns_frame, text = "4", fg = "black", width = 10, height = 3, bd = 0, bg = "#fff", cursor = "hand2", command = | |
import datetime
import math
import pandas as pd
import re
#region Lipid subclass information
fatty_aclys = ['FA', 'NAGly', 'NAGlySer', 'NAOrn', 'NAE', 'CAR', 'FAHFA']
glycerolipids = ['DG', 'EtherDG', 'DGDG', 'EtherDGDG', 'MGDG', 'EtherMGDG',
'SQDG', 'EtherSMGDG' 'MG', 'ADGGA', 'DGCC', 'DGGA', 'DGTS/A',
'LDGCC', 'LDGTS/A', 'EtherTG', 'TG', 'OxTG', 'FAHFATG']
glycerophospholipids = ['LPA', 'PA', 'EtherLPC', 'EtherPC', 'LPC', 'PC',
'EtherLPE', 'EtherPE', 'EtherPE(P)', 'PlasmPE', 'LNAPE',
'LPE', 'PE', 'BMP', 'EtherLPG', 'EtherPG', 'HBMP',
'LPG', 'PG', 'CL', 'DLCL', 'MLCL', 'Ac2PIM1', 'Ac2PIM2',
'Ac3PIM2', 'Ac4PIM2', 'EtherPI', 'LPI', 'PI', 'EtherPS',
'LNAPS', 'LPS', 'PS', 'PEtOH', 'PMeOH', 'EtherOxPE',
'OxPC', 'OxPE', 'OxPG', 'OxPI', 'OxPS']
prenol_lipids = ['VAE', 'CoQ', 'Vitamin E', 'Vitamin_E']
saccharolipids = ['LipidA']
sphingolipids = ['GM3', 'SHexCer', 'SHexCer+O',
'Cer_ADS', 'Cer_AP', 'Cer_AS', 'Cer_BDS', 'Cer_BS', 'Cer_HDS',
'Cer_HS', 'Cer_EBDS', 'Cer_EODS', 'Cer_EOS', 'Cer_NDS',
'Cer_NP', 'Cer_NS', 'CerP', 'AHexCer', 'HexCer_ADS',
'HexCer_AP', 'HexCer_AS', 'HexCer_BDS', 'HexCer_BS',
'HexCer_HDS', 'HexCer_HS', 'HexCer_EOS', 'HexCer_NDS',
'HexCer_NP', 'HexCer_NS', 'Hex2Cer', 'Hex3Cer', 'ASM',
'PE_Cer', 'PE_Cer+O', 'PI_Cer', 'SM', 'SM+O',
'PhytoSph', 'SL', 'SL+O', 'DHSph', 'Sph']
sterol_lipids = ['CASulfate', 'BileAcid',
'DCAE', 'GDCAE', 'GLCAE', 'TDCAE', 'TLCAE',
'AHexCAS', 'AHexCS', 'AHexSIS', 'AHexBRS', 'AHexSTS',
'Vitamin D', 'Vitamin_D',
'SSulfate', 'BRSE', 'CASE', 'CE', 'Cholesterol',
'SHex', 'SISE', 'STSE', 'SPE', 'BAHex', 'BASulfate', 'SPEHex',
'SPGHex', 'BRSLPHex', 'BRSPHex', 'CASLPHex', 'CASPHex',
'SISLPHex', 'SISPHex', 'STSLPHex', 'STSPHex']
lipidclass_dict = {'Fatty acyls': fatty_aclys,
'Glycerolipids': glycerolipids,
'Glycerophospholipids': glycerophospholipids,
'Prenol lipids': prenol_lipids,
'Saccharolipids': saccharolipids,
'Sphingolipids': sphingolipids,
'Sterol lipids': sterol_lipids
}
#endregion
#region Acyl chains information
no_acyl_list = ['Others', 'CoQ', 'CA',
'Vitamin D', 'Vitamin_D', 'Vitamin E', 'Vitamin_E',
'Cholesterol', 'SSulfate', 'CASulfate', 'BASulfate',
'SHex', 'SPE', 'BAHex', 'SPEHex', 'SPGHex']
mono_acyl_list = ['FA', 'NAE', 'CAR',
'MG', 'LDGCC', 'LDGTS/A',
'LPA', 'LPC', 'LPE', 'LPG', 'LPI', 'LPS',
'EtherLPC', 'EtherLPE', 'EtherLPG',
'PhytoSph', 'DHSph', 'Sph', 'VAE',
'DCAE', 'GDCAE', 'GLCAE', 'TDCAE', 'TLCAE',
'AHexCAS', 'AHexCS', 'AHexSIS', 'AHexBRS', 'AHexSTS',
'CE', 'BRSE', 'CASE', 'SISE', 'STSE']
tri_acyl_list = ['ADGGA', 'TG', 'EtherTG', 'HBMP', 'MLCL',
'Cer_EBDS', 'Cer_EODS', 'Cer_EOS',
'AHexCer', 'HexCer_EOS', 'ASM']
triacyls_sphingolipids = ['ASM', 'AHexCer', 'HexCer_EOS'
'Cer_EBDS', 'Cer_EODS', 'Cer_EOS',]
#endregion
class DataPreprocessor(object):
def __init__(self, path1, fmt1, path2, fmt2):
dt_now = datetime.datetime.now()
idx = str(dt_now).find('.') +1
stamp = str(dt_now)[:idx].replace('-', '').replace(' ', '_').replace('.', 's')
timestamp = stamp.replace(':', 'h', 1).replace(':', 'm', 1)
directory = path2.rsplit('/', 1)[0]
self.timestamp = timestamp
self.directory = directory
self.path1, self.path2 = path1, path2
self.fmt1, self.fmt2 = fmt1, fmt2
def merge_bipolarity_cid_data(self, output='pos'):
neg_path, neg_format = self.path1, self.fmt1
pos_path, pos_format = self.path2, self.fmt2
neg = self.check_input_format(neg_path, neg_format)
pos = self.check_input_format(pos_path, pos_format)
neg = self.add_moiety_info(neg)
pos = self.add_moiety_info(pos)
neg, pos = self.complement_moiety_info(neg, pos)
raw_neg_path = f'{self.directory}/{self.timestamp}_merged_neg_data.txt'
raw_pos_path = f'{self.directory}/{self.timestamp}_merged_pos_data.txt'
neg.to_csv(raw_neg_path, index=False, sep='\t')
pos.to_csv(raw_pos_path, index=False, sep='\t')
pos = self.extract_annotated_molecules(pos)
neg = self.extract_annotated_molecules(neg)
list_neg_path = f'{self.directory}/{self.timestamp}_annotation_list_neg.txt'
list_pos_path = f'{self.directory}/{self.timestamp}_annotation_list_pos.txt'
neg.to_csv(list_neg_path, index=False, sep='\t')
pos.to_csv(list_pos_path, index=False, sep='\t')
if output == 'pos':
pos['CCS'] = 0
txt_path = f'{self.directory}/{self.timestamp}_txt_library_pos.txt'
pos = self.rename_for_txt_library(pos)
pos.to_csv(txt_path, index=False, sep='\t',
columns=['Name', 'MZ', 'RT', 'Adduct', 'InChIKey',
'Formula', 'SMILES', 'Ontology', 'CCS'])
elif output == 'neg':
neg['CCS'] = 0
txt_path = f'{self.directory}/{self.timestamp}_txt_library_neg.txt'
neg = self.rename_for_txt_library(neg)
neg.to_csv(txt_path, index=False, sep='\t',
columns=['Name', 'MZ', 'RT', 'Adduct', 'InChIKey',
'Formula', 'SMILES', 'Ontology', 'CCS'])
def merge_cid_and_oad_data(self):
cid_path, cid_format = self.path1, self.fmt1
oad_path, oad_format = self.path2, self.fmt2
cid_raw_table = self.check_input_format(cid_path, cid_format)
oad = self.check_input_format(oad_path, oad_format)
cid_table = self.extract_annotated_molecules(cid_raw_table)
mass_tolerance = 0.01
rt_tolerance = 0.5
column_list = ['Metabolite name', 'Adduct type', 'Reference RT',
'Reference m/z', 'Formula', 'Ontology', 'INCHIKEY',
'SMILES']
for row, df in cid_table.iterrows():
front_mz = df['Average Mz'] - mass_tolerance
tail_mz = df['Average Mz'] + mass_tolerance
front_rt = df['Average Rt(min)'] - rt_tolerance
tail_rt = df['Average Rt(min)'] + rt_tolerance
extracted_oad_df = oad[
(oad['Average Mz'] > front_mz)&(oad['Average Mz'] < tail_mz)
&(oad['Average Rt(min)'] > front_rt)&(oad['Average Rt(min)'] < tail_rt)]
df_len = len(extracted_oad_df)
if df_len == 1:
target_row = extracted_oad_df.index[0]
for colomn in column_list:
oad.loc[target_row:target_row, colomn] = df[colomn]
elif df_len >= 2:
target_rows = list(extracted_oad_df.index)
for target_row in target_rows:
for colomn in column_list:
oad.loc[target_row:target_row, colomn] = df[colomn]
# oad = oad.rename(columns={'Alignment ID': 'ID',
# 'Average Rt(min)': 'RT(min)',
# 'Average Mz': 'Precursor m/z'})
save_path = f'{self.directory}/{self.timestamp}_merged_OAD_data.txt'
oad.to_csv(save_path, index=False, sep='\t')
def rename_for_txt_library(self, df):
df = df.rename(columns={'Metabolite name': 'Name',
'Reference m/z': 'MZ',
'Average Rt(min)': 'RT',
'Adduct type': 'Adduct',
'INCHIKEY': 'InChIKey'})
return df
def check_input_format(self, path, fmt):
if fmt == 'Alignment':
raw_table = pd.read_csv(path, skiprows=[0,1,2,3], sep='\t')
raw_table = raw_table.rename(columns={'Alignment ID': 'ID'})
elif fmt == 'PeakList':
raw_table = pd.read_csv(path, sep='\t')
raw_table = raw_table.rename(columns={'PeakID': 'ID',
'Title': 'Metabolite name',
'RT (min)': 'Average Rt(min)',
'Precursor m/z': 'Average Mz',
'Adduct': 'Adduct type',
'InChIKey': 'INCHIKEY',
'MSMS spectrum': 'MS/MS spectrum'})
return raw_table
def extract_annotated_molecules(self, df):
ex_df = df.dropna(subset=['Metabolite name'])
ex_df = ex_df[ex_df['Metabolite name'] != 'Unknown']
ex_df = ex_df[~ex_df['Metabolite name'].str.startswith('w/o')]
ex_df = ex_df[~ex_df['Metabolite name'].str.startswith('RIKEN')]
ex_df = ex_df[~ex_df['Metabolite name'].str.startswith('Unsettled')]
ex_df = ex_df.fillna({'Reference RT': 0})
return ex_df
def exclude_IS(self, df):
df = df[~df['Metabolite name'].str.contains('\(d\d+\)')]
return df
def add_moiety_info(self, df):
new_cols = ['chains solved', 'Brutto', 'Total chain', 'Total db',
'chain-1', 'db-1', 'chain-2', 'db-2',
'chain-3', 'db-3', 'chain-4', 'db-4']
for col in new_cols:
df[col] = 0 if col != 'chains solved' else False
annotated_df = self.extract_annotated_molecules(df)
names = list(annotated_df['Metabolite name'].values)
ontologies = list(annotated_df['Ontology'].values)
idxs, cols = list(annotated_df.index), list(annotated_df.columns)
col_pos = [cols.index(new) for new in new_cols]
def get_chain_and_db(moiety):
chain_and_db = moiety.split(':')
chain, db = int(chain_and_db[0]), int(chain_and_db[1])
return chain, db
new_values = []
exception_cls = ['CL', 'AHexCer', 'ASM',
'CerEBDS', 'Cer_EOS', 'HexCer_EOS']
for name, ontology in zip(names, ontologies):
solved, brutto, total_chain, total_db = False, 0, 0, 0
chian_1, db_1, chian_2, db_2, chian_3, db_3, chian_4, db_4 \
= 0, 0, 0, 0, 0, 0, 0, 0
if '|' in name:
solved = True
split_name = name.split('|')
brutto = re.findall(r'\d+\:\d+', split_name[0])[0]
total_chain, total_db = get_chain_and_db(brutto)
moieties = re.findall(r'\d+\:\d+', split_name[1])
if len(moieties) == 2:
chian_1, db_1 = get_chain_and_db(moieties[0])
chian_2, db_2 = get_chain_and_db(moieties[1])
if ontology in exception_cls:
solved = False
if ontology == 'AHexCer':
#AHexCer 60:2;3O|AHexCer (O-18:1)42:1;3O
chian_1, chian_2 = chian_2, chian_1
db_1, db_2 = db_2, db_1
elif len(moieties) == 3:
chian_1, db_1 = get_chain_and_db(moieties[0])
chian_2, db_2 = get_chain_and_db(moieties[1])
chian_3, db_3 = get_chain_and_db(moieties[2])
elif len(moieties) == 4:
chian_1, db_1 = get_chain_and_db(moieties[0])
chian_2, db_2 = get_chain_and_db(moieties[1])
chian_3, db_3 = get_chain_and_db(moieties[2])
chian_4, db_4 = get_chain_and_db(moieties[3])
else:
if ontology in no_acyl_list:
pass
elif ontology in mono_acyl_list:
solved = True
if ontology in sterol_lipids and '/' in name:
brutto = re.findall(r'\d+\:\d+', name)[1]
else:
brutto = re.findall(r'\d+\:\d+', name)[0]
total_chain, total_db = get_chain_and_db(brutto)
chian_1, db_1 = get_chain_and_db(brutto)
elif '(d' in name:
solved = True
moieties = re.findall(r'\d+\:\d+', name)
if len(moieties) == 2:
chian_1, db_1 = get_chain_and_db(moieties[0])
chian_2, db_2 = get_chain_and_db(moieties[1])
if len(moieties) == 3:
chian_1, db_1 = get_chain_and_db(moieties[0])
chian_2, db_2 = get_chain_and_db(moieties[1])
chian_3, db_3 = get_chain_and_db(moieties[2])
total_chain = chian_1 + chian_2 + chian_3 + chian_4
total_db = db_1 + db_2 + db_3 + db_4
brutto = str(total_chain) + ':' + str(total_db)
else:
brutto = re.findall(r'\d+\:\d+', name)[0]
total_chain, total_db = get_chain_and_db(brutto)
all_info = [solved, brutto, total_chain, total_db,
chian_1, db_1, chian_2, db_2,
chian_3, db_3, chian_4, db_4]
new_values.append(all_info)
# df.loc[idx:idx, new_cols] = all_info
df.iloc[idxs, col_pos] = new_values
return df
def complement_moiety_info(self, raw_neg, raw_pos):
neg = self.extract_annotated_molecules(raw_neg)
pos = self.extract_annotated_molecules(raw_pos)
raw_neg['chains complement'], raw_pos['chains complement'] = '', ''
raw_neg['comple delta RT'], raw_pos['comple delta RT'] = 0, 0
neg, pos = self.exclude_IS(neg), self.exclude_IS(pos)
# temporary change Brutto of PlasmPE
pos = self.temp_change_brutto_of_plasm(pos)
unsolved_neg = neg[(neg['chains solved'] == False)
|(neg['Ontology'] == 'EtherPE')]
unsolved_pos = pos[pos['chains solved'] == False]
comple_cols = ['Metabolite name', 'chains solved',
'chain-1', 'db-1', 'chain-2', 'db-2',
'chain-3', 'db-3', 'chain-4', 'db-4',
'chains complement', 'comple delta RT']
plasm_cols = ['Ontology', 'Brutto', 'Total db']
def get_abs_delta(rt, f_rt):
return self.math_floor((abs(rt-f_rt)), 3)
#region Neg
for row, one in unsolved_neg.iterrows():
brutto = one['Brutto']
ontology = one['Ontology']
rt = one['Average Rt(min)']
if ontology == 'EtherPE':
find = pos[((pos['Ontology']==ontology)|(pos['Ontology']=='PlasmPE'))
&(pos['Brutto']==brutto)
&(pos['chains solved']==True)]
else:
find = pos[(pos['Ontology']==ontology)
&(pos['Brutto']==brutto)
&(pos['chains solved']==True)]
if len(find) > 1:
idxs = list(find.index)
f_rts = list(find['Average Rt(min)'].values)
rt_d = {idx: get_abs_delta(rt, f_rt)
for idx, f_rt in zip(idxs, f_rts)}
cand_idx = sorted(rt_d.items(), key=lambda x:x[1])[0][0]
cols = list(find.columns)
find = find.loc[cand_idx:cand_idx, cols]
if len(find) == 1:
new_ont = find['Ontology'].values[0]
if new_ont == 'PlasmPE':
total_chain = find['Total chain'].values[0]
total_db = find['Total db'].values[0]
brutto = str(total_chain) + ':' + str(total_db)
raw_neg.loc[row:row, plasm_cols] = new_ont, brutto, total_db
name = find['Metabolite name']
chain_1, db_1 = find['chain-1'], find['db-1']
chain_2, db_2 = find['chain-2'], find['db-2']
chain_3, db_3 | |
# -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CA_Ensemble_Funcs.py
This file contains the functions that implement the Co-Association Matrix
Ensemble Method for the phase identification task.
Functions:
- SPClustering
- SPClustering_Precomp
- CAEnsemble
Publications related to this method:
<NAME> and <NAME>, “Phase Identification Using Co-Association Matrix Ensemble Clustering,” IET Smart Grid, no. Machine Learning Special Issue, Jun. 2020.
<NAME>, <NAME>, and <NAME>, “Parameter Tuning Analysis for Phase Identification Algorithms in Distribution System Model Calibration,” presented at the KPEC, Apr. 2021.
<NAME>, <NAME>, and <NAME>, “AMI Data Quality And Collection Method Consideration for Improving the Accuracy of Distribution System Models,” presented at the IEEE Photovoltaic Specialists Conference (PVSC), Chicago, IL, USA, 2019.
"""
# Import - Python Libraries
from sklearn.cluster import SpectralClustering
import numpy as np
from copy import deepcopy
# Import - Custom Libraries
import PhaseIdent_Utils as PIUtils
###############################################################################
#
# SPClustering
#
def SPClustering(features,k):
""" This function takes a window of timeseries data for the total number of
customers and the number of desired clusters and performs the spectral
clustering algorithm on that data, returning the cluster labels for each
customer. This is the internal spectral clustering function which is
called for each window (and each value in kVector). These results
are used to build the co-association matrix.
The kernel function has been hardcoded here to be the Radial
Basis Function ('rbf') based on the results of this research.
Parameters
---------
features: numpy array of float (customers,measurments) - a
'window' of time series measurements where customers with
missing data are removed. Any NaN values in this matrix
will cause the SpectralClustering function to fail.
k: int - Number of clusters
Returns
-------
clusterLabels: list of int - The resulting cluster label of
each customer (1-k)
"""
sp = SpectralClustering(n_clusters=k,affinity='rbf')
clusterLabels = sp.fit_predict(features)
return clusterLabels
# End of SPClustering
###############################################################################
#
# SPClustering_Precomp
#
def SPClustering_Precomp(aggWM,kFinal):
""" This function takes a precomputed affinity matrix, in the form
of a co-association matrix generated by CAEnsemble and will
use that to construct the final clusters representing the three-phases.
Parameters
---------
aggWM: ndarray of float, shape (customers,customers) affinity
matrix of paired/unpaired weights aggregated over all
available windows.
kFinal: int - the number of final clusters. This parameter
should be set based on the feeder topology. Setting this
parameter to 4 of 7 is a good place to start. If the feeder
in question has voltage regulating devices a larger number
of final clusters may be required.
Returns
-------
clusterLabels: list of int - The resulting cluster label of
each customer (1-k)
"""
sp = SpectralClustering(n_clusters=kFinal,n_init=10,assign_labels='discretize',affinity='precomputed')
clusterLabels = sp.fit_predict(aggWM)
return clusterLabels
# End of SPClustering_Precomp
###############################################################################
#
# CAEnsemble
#
def CAEnsemble(voltage,kVector,kFinal,custID,windowSize,lowWindowsThresh=4,printLowWinWarningFlag=True):
""" This function implements the ensemble of Spectral Clustering for the
task of phase identification task. The ensemble size is determined by
the number of sliding windows available given the windowSize parameter.
In each window the cluster labels are returned by the spectral clustering
algorithm and that clustering is then used to update a co-association matrix
based on pairwise paired/unpaired information in the cluster labels.
That weight matrix is then used for a final clustering into the final
clusters which represent phase groupings. The original utility phase
labels are not used in this function. The mapping of the final clusters
to particular phases is left to a subsequent step.
For more details, please see this paper:
<NAME> and <NAME>, “Phase Identification Using Co-Association Matrix Ensemble Clustering,” IET Smart Grid, no. Machine Learning Special Issue, Jun. 2020.
Parameters
---------
voltage: numpy array of float (measurements,customers) -
voltage timeseries for each customer. The timeseries
should be pre-processed into per-unit, difference (delta)
representation. This pre-processing is an essential step.
kVector: numpy array of int - a vector of the possible values of
k for the windows
kFinal: int - Number of clusters for the final clustering
custID: list of str - list of customer ids
windowSize: int - The size (in number of measurements) of the
sliding window
lowWindowsThresh: int - the minimum number of windows before
printing a warning that some customers had few windows
due to missing data. The default value is set to 4 if
this parameter is not specified.
printLowWinWarningFlag: boolean - allows supression of the printout
if customer has only a few windows in the ensemble. The
default value is True. If a customer is only present in
a small number of windows the co-association matrix will not
be built adequately for that customer (although it will not
affect other customers). Thus results for customers with
few windows should be considered low confidence predictions
and likely discarded
Returns
-------
finalClusterLabels: numpy array of int (1,customers)
array of the final cluster labels representing the
phases, but they will not match in number to the actual phases
Determining which cluster number goes with which real phase
is left for a future step. This parameter is one that
depends on the topology of the feeder. For more discussion
see the paper by <NAME> listed above. Starting values
to try for this parameter might be 4 or 7, topologies with
voltage regulators in the feeder may require a larger
number of final clusters.
noVotesIndex: list of int - list of customer indices that
did not recieve any votes (i.e. were removed from all
windows). This occurs due to missing data for a customer.
If all windows for that customer contain missing data, then
that customer will be eliminated from the analysis.
noVotesIDs: list of str - list of customer ids that did not
receive any votes (i.e. were removed from all windows due
to missing data)
clusteredIDs: list of str (customers) - list of customers IDs
that were clustered during the ensemble. The length of
clusteredIDs plus the length of noVotesIDs should equal
the total number of customers
custWindowCounts: numpy array of int (customers) - the count,
for each customer, of the number of windows that were
included in the analysis, i.e. the number of windows that
were not excluded due to missing data. This count is
significantly affected by the value chosen for the
windowSize parameter. Customers with a low number of
windows in the ensemble should be considered low confidence
in the final prediction as they will not populate the
co-association matrix properly. | |
<gh_stars>100-1000
'''
Created on 2016/2/19
:author: hubo
'''
from vlcp.config import defaultconfig
from vlcp.server.module import Module, api, depend, call_api, ModuleNotification
from vlcp.event.runnable import RoutineContainer
from vlcp.service.connection import jsonrpcserver
from vlcp.protocol.jsonrpc import JsonRPCConnectionStateEvent,\
JsonRPCProtocolException, JsonRPCErrorResultException,\
JsonRPCNotificationEvent
from vlcp.event.connection import ConnectionResetException, ResolveRequestEvent,\
ResolveResponseEvent
from vlcp.event.event import Event, withIndices, M_
from vlcp.utils import ovsdb
import socket
from contextlib import closing
@withIndices('systemid', 'connection', 'connmark', 'vhost')
class OVSDBConnectionSetup(Event):
pass
@withIndices('state', 'datapathid', 'systemid', 'name', 'connection', 'connmark', 'vhost', 'bridgeuuid')
class OVSDBBridgeSetup(Event):
UP = 'up'
DOWN = 'down'
class OVSDBBridgeNotAppearException(Exception):
pass
def _get_endpoint(conn):
raddr = getattr(conn, 'remoteaddr', None)
if raddr:
if isinstance(raddr, tuple):
# Ignore port
return raddr[0]
else:
# Unix socket
return raddr
else:
return ''
@defaultconfig
@depend(jsonrpcserver.JsonRPCServer)
class OVSDBManager(Module):
'''
Manage Openflow Connections
'''
service = True
# Bind to JsonRPCServer vHosts. If not None, should be a list of vHost names e.g. ``['']``
_default_vhostbind = None
# Only acquire information from bridges with this names
_default_bridgenames = None
def __init__(self, server):
Module.__init__(self, server)
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._manage_conns
self.routines.append(self.apiroutine)
self.managed_conns = {}
self.managed_systemids = {}
self.managed_bridges = {}
self.managed_routines = []
self.endpoint_conns = {}
self.createAPI(api(self.getconnection, self.apiroutine),
api(self.waitconnection, self.apiroutine),
api(self.getdatapathids, self.apiroutine),
api(self.getalldatapathids, self.apiroutine),
api(self.getallconnections, self.apiroutine),
api(self.getbridges, self.apiroutine),
api(self.getbridge, self.apiroutine),
api(self.getbridgebyuuid, self.apiroutine),
api(self.waitbridge, self.apiroutine),
api(self.waitbridgebyuuid, self.apiroutine),
api(self.getsystemids, self.apiroutine),
api(self.getallsystemids, self.apiroutine),
api(self.getconnectionbysystemid, self.apiroutine),
api(self.waitconnectionbysystemid, self.apiroutine),
api(self.getconnectionsbyendpoint, self.apiroutine),
api(self.getconnectionsbyendpointname, self.apiroutine),
api(self.getendpoints, self.apiroutine),
api(self.getallendpoints, self.apiroutine),
api(self.getallbridges, self.apiroutine),
api(self.getbridgeinfo, self.apiroutine),
api(self.waitbridgeinfo, self.apiroutine)
)
self._synchronized = False
async def _update_bridge(self, connection, protocol, bridge_uuid, vhost):
try:
method, params = ovsdb.transact('Open_vSwitch',
ovsdb.wait('Bridge', [["_uuid", "==", ovsdb.uuid(bridge_uuid)]],
["datapath_id"], [{"datapath_id": ovsdb.oset()}], False, 5000),
ovsdb.select('Bridge', [["_uuid", "==", ovsdb.uuid(bridge_uuid)]],
["datapath_id","name"]))
jsonrpc_result, _ = await protocol.querywithreply(method, params, connection, self.apiroutine)
r = jsonrpc_result[0]
if 'error' in r:
raise JsonRPCErrorResultException('Error while acquiring datapath-id: ' + repr(r['error']))
r = jsonrpc_result[1]
if 'error' in r:
raise JsonRPCErrorResultException('Error while acquiring datapath-id: ' + repr(r['error']))
if r['rows']:
r0 = r['rows'][0]
name = r0['name']
dpid = int(r0['datapath_id'], 16)
if self.bridgenames is None or name in self.bridgenames:
self.managed_bridges[connection].append((vhost, dpid, name, bridge_uuid))
self.managed_conns[(vhost, dpid)] = connection
await self.apiroutine.wait_for_send(OVSDBBridgeSetup(OVSDBBridgeSetup.UP,
dpid,
connection.ovsdb_systemid,
name,
connection,
connection.connmark,
vhost,
bridge_uuid))
except JsonRPCProtocolException:
pass
async def _get_bridges(self, connection, protocol):
try:
try:
vhost = protocol.vhost
if not hasattr(connection, 'ovsdb_systemid'):
method, params = ovsdb.transact('Open_vSwitch', ovsdb.select('Open_vSwitch', [], ['external_ids']))
jsonrpc_result, _ = await protocol.querywithreply(method, params, connection, self.apiroutine)
result = jsonrpc_result[0]
system_id = ovsdb.omap_getvalue(result['rows'][0]['external_ids'], 'system-id')
connection.ovsdb_systemid = system_id
else:
system_id = connection.ovsdb_systemid
if (vhost, system_id) in self.managed_systemids:
oc = self.managed_systemids[(vhost, system_id)]
ep = _get_endpoint(oc)
econns = self.endpoint_conns.get((vhost, ep))
if econns:
try:
econns.remove(oc)
except ValueError:
pass
del self.managed_systemids[(vhost, system_id)]
self.managed_systemids[(vhost, system_id)] = connection
self.managed_bridges[connection] = []
ep = _get_endpoint(connection)
self.endpoint_conns.setdefault((vhost, ep), []).append(connection)
method, params = ovsdb.monitor('Open_vSwitch', 'ovsdb_manager_bridges_monitor', {'Bridge':ovsdb.monitor_request(['name', 'datapath_id'])})
try:
jsonrpc_result, _ = await protocol.querywithreply(method, params, connection, self.apiroutine)
except JsonRPCErrorResultException:
# The monitor is already set, cancel it first
method, params = ovsdb.monitor_cancel('ovsdb_manager_bridges_monitor')
await protocol.querywithreply(method, params, connection, self.apiroutine, False)
method, params = ovsdb.monitor('Open_vSwitch', 'ovsdb_manager_bridges_monitor', {'Bridge':ovsdb.monitor_request(['name', 'datapath_id'])})
jsonrpc_result, _ = await protocol.querywithreply(method, params, connection, self.apiroutine)
except Exception:
await self.apiroutine.wait_for_send(OVSDBConnectionSetup(system_id, connection, connection.connmark, vhost))
raise
else:
# Process initial bridges
init_subprocesses = []
if jsonrpc_result and 'Bridge' in jsonrpc_result:
init_subprocesses = [self._update_bridge(connection, protocol, buuid, vhost)
for buuid in jsonrpc_result['Bridge'].keys()]
async def init_process():
try:
await self.apiroutine.execute_all(init_subprocesses)
except Exception:
await self.apiroutine.wait_for_send(OVSDBConnectionSetup(system_id, connection, connection.connmark, vhost))
raise
else:
await self.apiroutine.waitForSend(OVSDBConnectionSetup(system_id, connection, connection.connmark, vhost))
self.apiroutine.subroutine(init_process())
# Wait for notify
notification = JsonRPCNotificationEvent.createMatcher('update', connection, connection.connmark, _ismatch = lambda x: x.params[0] == 'ovsdb_manager_bridges_monitor')
conn_down = protocol.statematcher(connection)
while True:
ev, m = await M_(conn_down, notification)
if m is conn_down:
break
else:
for buuid, v in ev.params[1]['Bridge'].items():
# If a bridge's name or datapath-id is changed, we remove this bridge and add it again
if 'old' in v:
# A bridge is deleted
bridges = self.managed_bridges[connection]
for i in range(0, len(bridges)):
if buuid == bridges[i][3]:
self.scheduler.emergesend(OVSDBBridgeSetup(OVSDBBridgeSetup.DOWN,
bridges[i][1],
system_id,
bridges[i][2],
connection,
connection.connmark,
vhost,
bridges[i][3],
new_datapath_id =
int(v['new']['datapath_id'], 16) if 'new' in v and 'datapath_id' in v['new']
else None))
del self.managed_conns[(vhost, bridges[i][1])]
del bridges[i]
break
if 'new' in v:
# A bridge is added
self.apiroutine.subroutine(self._update_bridge(connection, protocol, buuid, vhost))
except JsonRPCProtocolException:
pass
finally:
del connection._ovsdb_manager_get_bridges
async def _manage_existing(self):
conns = await call_api(self.apiroutine, "jsonrpcserver", "getconnections", {})
vb = self.vhostbind
for c in conns:
if vb is None or c.protocol.vhost in vb:
if not hasattr(c, '_ovsdb_manager_get_bridges'):
c._ovsdb_manager_get_bridges = self.apiroutine.subroutine(self._get_bridges(c, c.protocol))
matchers = [OVSDBConnectionSetup.createMatcher(None, c, c.connmark) for c in conns
if vb is None or c.protocol.vhost in vb]
await self.apiroutine.wait_for_all(*matchers)
self._synchronized = True
await self.apiroutine.wait_for_send(ModuleNotification(self.getServiceName(), 'synchronized'))
async def _wait_for_sync(self):
if not self._synchronized:
await ModuleNotification.createMatcher(self.getServiceName(), 'synchronized')
async def _manage_conns(self):
try:
self.apiroutine.subroutine(self._manage_existing())
vb = self.vhostbind
if vb is not None:
conn_up = JsonRPCConnectionStateEvent.createMatcher(state = JsonRPCConnectionStateEvent.CONNECTION_UP,
_ismatch = lambda x: x.createby.vhost in vb)
conn_down = JsonRPCConnectionStateEvent.createMatcher(state = JsonRPCConnectionStateEvent.CONNECTION_DOWN,
_ismatch = lambda x: x.createby.vhost in vb)
else:
conn_up = JsonRPCConnectionStateEvent.createMatcher(state = JsonRPCConnectionStateEvent.CONNECTION_UP)
conn_down = JsonRPCConnectionStateEvent.createMatcher(state = JsonRPCConnectionStateEvent.CONNECTION_DOWN)
while True:
ev, m = await M_(conn_up, conn_down)
if m is conn_up:
if not hasattr(ev.connection, '_ovsdb_manager_get_bridges'):
ev.connection._ovsdb_manager_get_bridges = self.apiroutine.subroutine(self._get_bridges(ev.connection, ev.createby))
else:
conn = ev.connection
bridges = self.managed_bridges.get(conn)
if bridges is not None:
del self.managed_systemids[(ev.createby.vhost, conn.ovsdb_systemid)]
del self.managed_bridges[conn]
for vhost, dpid, name, buuid in bridges:
del self.managed_conns[(vhost, dpid)]
self.scheduler.emergesend(OVSDBBridgeSetup(OVSDBBridgeSetup.DOWN,
dpid,
conn.ovsdb_systemid,
name,
conn,
conn.connmark,
ev.createby.vhost,
buuid))
econns = self.endpoint_conns.get(_get_endpoint(conn))
if econns is not None:
try:
econns.remove(conn)
except ValueError:
pass
finally:
for c in self.managed_bridges.keys():
if hasattr(c, '_ovsdb_manager_get_bridges'):
c._ovsdb_manager_get_bridges.close()
bridges = self.managed_bridges.get(c)
if bridges is not None:
for vhost, dpid, name, buuid in bridges:
del self.managed_conns[(vhost, dpid)]
self.scheduler.emergesend(OVSDBBridgeSetup(OVSDBBridgeSetup.DOWN,
dpid,
c.ovsdb_systemid,
name,
c,
c.connmark,
c.protocol.vhost,
buuid))
async def getconnection(self, datapathid, vhost = ''):
"Get current connection of datapath"
await self._wait_for_sync()
return self.managed_conns.get((vhost, datapathid))
async def waitconnection(self, datapathid, timeout = 30, vhost = ''):
"Wait for a datapath connection"
c = await self.getconnection(datapathid, vhost)
if c is None:
timeout_, ev, m = await self.apiroutine.wait_with_timeout(timeout,
OVSDBBridgeSetup.createMatcher(
state = OVSDBBridgeSetup.UP,
datapathid = datapathid, vhost = vhost))
if timeout_:
raise ConnectionResetException('Datapath is not connected')
return ev.connection
else:
return c
async def getdatapathids(self, vhost = ''):
"Get All datapath IDs"
await self._wait_for_sync()
return [k[1] for k in self.managed_conns.keys() if k[0] == vhost]
async def getalldatapathids(self):
"Get all datapath IDs from any vhost. Return ``(vhost, datapathid)`` pair."
await self._wait_for_sync()
return list(self.managed_conns.keys())
async def getallconnections(self, vhost = ''):
"Get all connections from vhost. If vhost is None, return all connections from any host"
await self._wait_for_sync()
if vhost is None:
return list(self.managed_bridges.keys())
else:
return list(k for k in self.managed_bridges.keys() if k.protocol.vhost == vhost)
async def getbridges(self, connection):
"Get all ``(dpid, name, _uuid)`` tuple on this connection"
await self._wait_for_sync()
bridges = self.managed_bridges.get(connection)
if bridges is not None:
return [(dpid, name, buuid) for _, dpid, name, buuid in bridges]
else:
return None
async def getallbridges(self, vhost = None):
"Get all ``(dpid, name, _uuid)`` tuple for all connections, optionally filtered by vhost"
await self._wait_for_sync()
if vhost is not None:
return [(dpid, name, buuid)
for c, bridges in self.managed_bridges.items()
if c.protocol.vhost == vhost
for _, dpid, name, buuid in bridges]
else:
return [(dpid, name, buuid)
for c, bridges in self.managed_bridges.items()
for _, dpid, name, buuid in bridges]
async def getbridge(self, connection, name):
"Get datapath ID on this connection with specified name"
await self._wait_for_sync()
bridges = self.managed_bridges.get(connection)
if bridges is not None:
for _, dpid, n, _ in bridges:
if n == name:
return dpid
return None
else:
return None
async def waitbridge(self, connection, name, timeout = 30):
"Wait for bridge with specified name appears and return the datapath-id"
bnames = self.bridgenames
if bnames is not None and name not in bnames:
raise OVSDBBridgeNotAppearException('Bridge ' + repr(name) + ' does not appear: it is not in the selected bridge names')
dpid = await self.getbridge(connection, name)
if dpid is None:
bridge_setup = OVSDBBridgeSetup.createMatcher(OVSDBBridgeSetup.UP,
None,
None,
name,
connection
)
conn_down = JsonRPCConnectionStateEvent.createMatcher(JsonRPCConnectionStateEvent.CONNECTION_DOWN,
connection,
connection.connmark)
timeout_, ev, m = await self.apiroutine.wait_with_timeout(timeout, bridge_setup, conn_down)
if timeout_:
raise OVSDBBridgeNotAppearException('Bridge ' + repr(name) + ' does not appear')
elif m is conn_down:
raise ConnectionResetException('Connection is down before bridge ' + repr(name) + ' appears')
else:
return ev.datapathid
else:
return dpid
async def getbridgebyuuid(self, connection, uuid):
"Get datapath ID of bridge on this connection with specified _uuid"
await self._wait_for_sync()
bridges = self.managed_bridges.get(connection)
if bridges is not None:
for _, dpid, _, buuid in bridges:
if | |
" " + status)
@deprecated
def find_worker(self, node):
"""Get worker instance which emulates the specified node.
Replaced by get_worker.
Args:
node: nodename or NodeWrapper instance.
Returns:
Worker instance
"""
return self.get_worker(node)
def get_worker(self, node):
"""Get worker instance which emulates the specified node
Args:
node: Nodename or NodeWrapper instance.
Returns:
Worker instance
"""
if(isinstance(node, NodeWrapper)):
return node.worker
return self.node_to_worker[node]
def get_log_folder(self):
"""Get folder to which log files will be saved.
Returns:
Logfile folder as String.
"""
return "/tmp/maxinet_logs/" + Tools.time_to_string(self.starttime) +\
"/"
def terminate_logging(self):
"""Stop logging."""
for worker in self.cluster.workers():
worker.run_cmd("killall mpstat getRxTx.sh getMemoryUsage.sh")
#get CPU logs
worker.get_file("/tmp/maxinet_cpu_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
#get memory logs
worker.get_file("/tmp/maxinet_mem_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
#get interface logs
intf = worker.run_cmd("ip addr show to " + worker.ip(classifier="backend") + "/24 " +
"| head -n1 | cut -d' ' -f2 | tr -d :")\
.strip()
worker.get_file("/tmp/maxinet_intf_" + intf + "_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log",
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/")
self._print_log_info()
self._print_monitor_info()
self.isMonitoring = False
def log_cpu(self):
"""Log cpu useage of workers.
Places log files in /tmp/maxinet_logs/.
"""
for worker in self.cluster.workers():
self.log_cpu_of_worker(worker)
def log_cpu_of_worker(self, worker):
"""Log cpu usage of worker.
Places log file in /tmp/maxinet_logs/.
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
worker.daemonize("LANG=en_EN.UTF-8 mpstat 1 | while read l; " +
"do echo -n \"`date +%s` \" ; echo \"$l \" ;" +
" done > \"/tmp/maxinet_cpu_" + str(self.hostname_to_workerid[worker.hn()]) +
"_(" + worker.hn() + ").log\"")
def log_free_memory(self):
"""Log memory usage of workers.
Places log files in /tmp/maxinet_logs.
Format is:
timestamp,FreeMemory,Buffers,Cached
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
for worker in self.cluster.workers():
worker.daemonize_script("getMemoryUsage.sh", " > \"/tmp/maxinet_mem_" +
str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() + ").log\"")
def log_interfaces_of_node(self, node):
"""Log statistics of interfaces of node.
Places logs in /tmp/maxinet_logs.
Format is:
timestamp,received bytes,sent bytes,received packets,sent packets
"""
subprocess.call(["mkdir", "-p", "/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/"])
node = self.get(node)
worker = self.get_worker(node)
for intf in node.intfNames():
self.log_interface(worker, intf)
def log_interface(self, worker, intf):
"""Log statistics of interface of worker.
Places logs in /tmp/maxinet_logs.
Format is:
timestamp,received bytes,sent bytes,received packets,sent packets
"""
worker.daemonize_script("getRxTx.sh", " " + intf + " > \"/tmp/maxinet_intf_" +
intf + "_" + str(self.hostname_to_workerid[worker.hn()]) + "_(" + worker.hn() +
").log\"")
def monitor(self):
"""Log statistics of worker interfaces and memory usage.
Places log files in /tmp/maxinet_logs.
"""
self.isMonitoring = True
self.log_free_memory()
self.log_cpu()
for worker in self.cluster.workers():
intf = worker.run_cmd("ip addr show to " + worker.ip(classifier="backend") + "/24 " +
"| head -n1 | cut -d' ' -f2 | tr -d :")\
.strip()
if(intf == ""):
self.logger.warn("could not find main eth interface for " +
worker.hn() + ". no logging possible.")
else:
self.log_interface(worker, intf)
def _print_log_info(self):
"""Place log info message in log if log functions where used.
Prints info one time only even if called multiple times.
"""
if(not self._printed_log_info):
self._printed_log_info = True
self.logger.info("Log files will be placed in /tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) + "/." +
" You might want to save them somewhere else.")
def _print_monitor_info(self):
"""Place monitor info message in log if Experiment was monitored."""
self.logger.info("You monitored this experiment. To generate a graph" +
" from your logs call " +
"\"/usr/local/share/MaxiNet/maxinet_plot.py " +
"/tmp/maxinet_logs/" +
Tools.time_to_string(self.starttime) +
"/ plot.png\" ")
def CLI(self, plocals, pglobals):
"""Open interactive command line interface.
Arguments are used to allow usage of python commands in the same
scope as the one where CLI was called.
Args:
plocals: Dictionary as returned by locals()
pglobals: Dictionary as returned by globals()
"""
CLI(self, plocals, pglobals)
def addNode(self, name, wid=None, pos=None):
"""Do bookkeeping to add a node at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
This does NOT actually create a Node object on the mininet
instance but is a helper function for addHost etc.
Args:
name: Node name.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
"""
if (wid is None):
wid = random.randint(0, self.cluster.num_workers() - 1)
if (not pos is None):
wid = self.hostname_to_workerid[self.node_to_worker[pos].hn()]
self.node_to_worker[name] = self.cluster.get_worker(self.workerid_to_hostname[wid])
self.node_to_wrapper[name] = NodeWrapper(name, self.get_worker(name))
self.nodes.append(self.node_to_wrapper[name])
def addHost(self, name, cls=None, wid=None, pos=None, **params):
"""Add host at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Host name.
cls: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet host class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addHost(name, cls=cls, **params)
self.hosts.append(self.get(name))
#deactivate TSO
if (self.config.deactivateTSO()):
for intf in self.get_node(name).intfNames():
self.get_node(name).cmd("sudo ethtool -K %s tso off" % intf)
#set MTU if necessary
if (self.config.run_with_1500_mtu()):
self.setMTU(self.get_node(name), 1450)
return self.get(name)
def addSwitch(self, name, cls=None, wid=None, pos=None, **params):
"""Add switch at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Switch name.
cls: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet switch class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addSwitch(name, cls, **params)
self.switches.append(self.get(name))
#set MTU if necessary
if (self.config.run_with_1500_mtu()):
self.setMTU(self.get_node(name), 1450)
return self.get(name)
def addController(self, name="c0", controller=None, wid=None, pos=None,
**params):
"""Add controller at runtime.
Use wid to specifiy worker id or pos to specify worker of
existing node. If none is given random worker is chosen.
Args:
name: Controller name.
controller: Optional mininet class to use for instanciation.
wid: Optional worker id to place node.
pos: Optional existing node name whose worker should be used
as host of node.
**params: parameters to use at mininet controller class
instanciation.
"""
self.addNode(name, wid=wid, pos=pos)
self.get_worker(name).addController(name, controller, **params)
return self.get(name)
def name(self, node):
"""Get name of network node.
Args:
node: Node name or NodeWrapper instance.
Returns:
String of node name.
"""
if(isinstance(node, NodeWrapper)):
return node.nn
return node
def addLink(self, node1, node2, port1=None, port2=None, cls=None,
autoconf=False, **params):
"""Add link at runtime.
Add link at runtime and create tunnels between workers if
necessary. Will not work for mininet.node.UserSwitch switches.
Be aware that tunnels will only work between switches so if you
want to create a link using a host at one side make sure that
both nodes are located on the same worker.
autoconf parameter handles attach() and config calls on switches and
hosts.
Args:
node1: Node name or NodeWrapper instance.
node2: Node name or NodeWrapper instance.
port1: Optional port number of link on node1.
port2: Optional port number of link on node2.
cls: Optional class to use on Link creation. Be aware that
only mininet.link.Link and mininet.link.TCLink are
supported for tunnels.
autoconf: mininet requires some calls to makIe newly added
tunnels work. If autoconf is set to True MaxiNet will
issue these calls automatically.
Raises:
RuntimeError: If cls is not None or Link or TCLink and
tunneling is needed.
"""
w1 = self.get_worker(node1)
w2 = self.get_worker(node2)
if(not isinstance(node1, NodeWrapper)):
node1 = self.get(node1)
if(not isinstance(node2, NodeWrapper)):
node2 = self.get(node2)
if(w1 == w2):
self.logger.debug("no tunneling needed")
l = w1.addLink(self.name(node1), self.name(node2), port1, port2,
cls, **params)
else:
self.logger.debug("tunneling needed")
if(not ((node1 in self.switches) and (node2 in self.switches))):
self.logger.error("We cannot create tunnels between switches" +
" and hosts. Sorry.")
raise RuntimeError("Can't create tunnel between switch and" +
"host")
if(not ((cls is None) or isinstance(cls, Link) or
isinstance(cls, TCLink))):
self.logger.error("Only Link or TCLink instances are " +
"supported by MaxiNet")
raise RuntimeError("Only Link or TCLink instances are " +
"supported by MaxiNet")
intfn = self.cluster.create_tunnel(w1, w2)
if((cls is None) or isinstance(cls, TCLink)):
intf = TCIntf
else:
intf = Intf
w1.addTunnel(intfn, self.name(node1), port1, intf, **params)
w2.addTunnel(intfn, | |
1L), groups=1, bias=False)
self.conv5_2_1x1_reduce_bn = self.__batch_normalization(2, 'conv5_2_1x1_reduce/bn', num_features=512, eps=9.99999974738e-06, momentum=0.0)
self.conv5_2_3x3 = self.__conv(2, name='conv5_2_3x3', in_channels=512, out_channels=512, kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.conv5_2_3x3_bn = self.__batch_normalization(2, 'conv5_2_3x3/bn', num_features=512, eps=9.99999974738e-06, momentum=0.0)
self.conv5_2_1x1_increase = self.__conv(2, name='conv5_2_1x1_increase', in_channels=512, out_channels=2048, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.conv5_2_1x1_increase_bn = self.__batch_normalization(2, 'conv5_2_1x1_increase/bn', num_features=2048, eps=9.99999974738e-06, momentum=0.0)
self.conv5_2_1x1_down = self.__conv(2, name='conv5_2_1x1_down', in_channels=2048, out_channels=128, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=True)
self.conv5_2_1x1_up = self.__conv(2, name='conv5_2_1x1_up', in_channels=128, out_channels=2048, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=True)
self.conv5_3_1x1_reduce = self.__conv(2, name='conv5_3_1x1_reduce', in_channels=2048, out_channels=512, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.conv5_3_1x1_reduce_bn = self.__batch_normalization(2, 'conv5_3_1x1_reduce/bn', num_features=512, eps=9.99999974738e-06, momentum=0.0)
self.conv5_3_3x3 = self.__conv(2, name='conv5_3_3x3', in_channels=512, out_channels=512, kernel_size=(3L, 3L), stride=(1L, 1L), groups=1, bias=False)
self.conv5_3_3x3_bn = self.__batch_normalization(2, 'conv5_3_3x3/bn', num_features=512, eps=9.99999974738e-06, momentum=0.0)
self.conv5_3_1x1_increase = self.__conv(2, name='conv5_3_1x1_increase', in_channels=512, out_channels=2048, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
self.conv5_3_1x1_increase_bn = self.__batch_normalization(2, 'conv5_3_1x1_increase/bn', num_features=2048, eps=9.99999974738e-06, momentum=0.0)
self.conv5_3_1x1_down = self.__conv(2, name='conv5_3_1x1_down', in_channels=2048, out_channels=128, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=True)
self.conv5_3_1x1_up = self.__conv(2, name='conv5_3_1x1_up', in_channels=128, out_channels=2048, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=True)
self.feat_extract = self.__conv(2, name='feat_extract', in_channels=2048, out_channels=256, kernel_size=(1L, 1L), stride=(1L, 1L), groups=1, bias=False)
def forward(self, x):
conv1_7x7_s2_pad = F.pad(x, (3L, 3L, 3L, 3L))
conv1_7x7_s2 = self.conv1_7x7_s2(conv1_7x7_s2_pad)
conv1_7x7_s2_bn = self.conv1_7x7_s2_bn(conv1_7x7_s2)
conv1_relu_7x7_s2 = F.relu(conv1_7x7_s2_bn)
pool1_3x3_s2_pad = F.pad(conv1_relu_7x7_s2, (0L, 1L, 0L, 1L), value=float('-inf'))
pool1_3x3_s2 = F.max_pool2d(pool1_3x3_s2_pad, kernel_size=(3L, 3L), stride=(2L, 2L), padding=0, ceil_mode=False)
conv2_1_1x1_reduce = self.conv2_1_1x1_reduce(pool1_3x3_s2)
conv2_1_1x1_proj = self.conv2_1_1x1_proj(pool1_3x3_s2)
conv2_1_1x1_reduce_bn = self.conv2_1_1x1_reduce_bn(conv2_1_1x1_reduce)
conv2_1_1x1_proj_bn = self.conv2_1_1x1_proj_bn(conv2_1_1x1_proj)
conv2_1_1x1_reduce_relu = F.relu(conv2_1_1x1_reduce_bn)
conv2_1_3x3_pad = F.pad(conv2_1_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv2_1_3x3 = self.conv2_1_3x3(conv2_1_3x3_pad)
conv2_1_3x3_bn = self.conv2_1_3x3_bn(conv2_1_3x3)
conv2_1_3x3_relu = F.relu(conv2_1_3x3_bn)
conv2_1_1x1_increase = self.conv2_1_1x1_increase(conv2_1_3x3_relu)
conv2_1_1x1_increase_bn = self.conv2_1_1x1_increase_bn(conv2_1_1x1_increase)
conv2_1_global_pool = F.avg_pool2d(conv2_1_1x1_increase_bn, kernel_size=(56L, 56L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv2_1_1x1_down = self.conv2_1_1x1_down(conv2_1_global_pool)
conv2_1_1x1_down_relu = F.relu(conv2_1_1x1_down)
conv2_1_1x1_up = self.conv2_1_1x1_up(conv2_1_1x1_down_relu)
conv2_1_prob = F.sigmoid(conv2_1_1x1_up)
conv2_1_1x1_increase_bn_scale = conv2_1_prob * conv2_1_1x1_increase_bn
conv2_1 = conv2_1_1x1_increase_bn_scale + conv2_1_1x1_proj_bn
conv2_1_relu = F.relu(conv2_1)
conv2_2_1x1_reduce = self.conv2_2_1x1_reduce(conv2_1_relu)
conv2_2_1x1_reduce_bn = self.conv2_2_1x1_reduce_bn(conv2_2_1x1_reduce)
conv2_2_1x1_reduce_relu = F.relu(conv2_2_1x1_reduce_bn)
conv2_2_3x3_pad = F.pad(conv2_2_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv2_2_3x3 = self.conv2_2_3x3(conv2_2_3x3_pad)
conv2_2_3x3_bn = self.conv2_2_3x3_bn(conv2_2_3x3)
conv2_2_3x3_relu = F.relu(conv2_2_3x3_bn)
conv2_2_1x1_increase = self.conv2_2_1x1_increase(conv2_2_3x3_relu)
conv2_2_1x1_increase_bn = self.conv2_2_1x1_increase_bn(conv2_2_1x1_increase)
conv2_2_global_pool = F.avg_pool2d(conv2_2_1x1_increase_bn, kernel_size=(56L, 56L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv2_2_1x1_down = self.conv2_2_1x1_down(conv2_2_global_pool)
conv2_2_1x1_down_relu = F.relu(conv2_2_1x1_down)
conv2_2_1x1_up = self.conv2_2_1x1_up(conv2_2_1x1_down_relu)
conv2_2_prob = F.sigmoid(conv2_2_1x1_up)
conv2_2_1x1_increase_bn_scale = conv2_2_prob * conv2_2_1x1_increase_bn
conv2_2 = conv2_2_1x1_increase_bn_scale + conv2_1_relu
conv2_2_relu = F.relu(conv2_2)
conv2_3_1x1_reduce = self.conv2_3_1x1_reduce(conv2_2_relu)
conv2_3_1x1_reduce_bn = self.conv2_3_1x1_reduce_bn(conv2_3_1x1_reduce)
conv2_3_1x1_reduce_relu = F.relu(conv2_3_1x1_reduce_bn)
conv2_3_3x3_pad = F.pad(conv2_3_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv2_3_3x3 = self.conv2_3_3x3(conv2_3_3x3_pad)
conv2_3_3x3_bn = self.conv2_3_3x3_bn(conv2_3_3x3)
conv2_3_3x3_relu = F.relu(conv2_3_3x3_bn)
conv2_3_1x1_increase = self.conv2_3_1x1_increase(conv2_3_3x3_relu)
conv2_3_1x1_increase_bn = self.conv2_3_1x1_increase_bn(conv2_3_1x1_increase)
conv2_3_global_pool = F.avg_pool2d(conv2_3_1x1_increase_bn, kernel_size=(56L, 56L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv2_3_1x1_down = self.conv2_3_1x1_down(conv2_3_global_pool)
conv2_3_1x1_down_relu = F.relu(conv2_3_1x1_down)
conv2_3_1x1_up = self.conv2_3_1x1_up(conv2_3_1x1_down_relu)
conv2_3_prob = F.sigmoid(conv2_3_1x1_up)
conv2_3_1x1_increase_bn_scale = conv2_3_prob * conv2_3_1x1_increase_bn
conv2_3 = conv2_3_1x1_increase_bn_scale + conv2_2_relu
conv2_3_relu = F.relu(conv2_3)
conv3_1_1x1_proj = self.conv3_1_1x1_proj(conv2_3_relu)
conv3_1_1x1_reduce = self.conv3_1_1x1_reduce(conv2_3_relu)
conv3_1_1x1_proj_bn = self.conv3_1_1x1_proj_bn(conv3_1_1x1_proj)
conv3_1_1x1_reduce_bn = self.conv3_1_1x1_reduce_bn(conv3_1_1x1_reduce)
conv3_1_1x1_reduce_relu = F.relu(conv3_1_1x1_reduce_bn)
conv3_1_3x3_pad = F.pad(conv3_1_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv3_1_3x3 = self.conv3_1_3x3(conv3_1_3x3_pad)
conv3_1_3x3_bn = self.conv3_1_3x3_bn(conv3_1_3x3)
conv3_1_3x3_relu = F.relu(conv3_1_3x3_bn)
conv3_1_1x1_increase = self.conv3_1_1x1_increase(conv3_1_3x3_relu)
conv3_1_1x1_increase_bn = self.conv3_1_1x1_increase_bn(conv3_1_1x1_increase)
conv3_1_global_pool = F.avg_pool2d(conv3_1_1x1_increase_bn, kernel_size=(28L, 28L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv3_1_1x1_down = self.conv3_1_1x1_down(conv3_1_global_pool)
conv3_1_1x1_down_relu = F.relu(conv3_1_1x1_down)
conv3_1_1x1_up = self.conv3_1_1x1_up(conv3_1_1x1_down_relu)
conv3_1_prob = F.sigmoid(conv3_1_1x1_up)
conv3_1_1x1_increase_bn_scale = conv3_1_prob * conv3_1_1x1_increase_bn
conv3_1 = conv3_1_1x1_increase_bn_scale + conv3_1_1x1_proj_bn
conv3_1_relu = F.relu(conv3_1)
conv3_2_1x1_reduce = self.conv3_2_1x1_reduce(conv3_1_relu)
conv3_2_1x1_reduce_bn = self.conv3_2_1x1_reduce_bn(conv3_2_1x1_reduce)
conv3_2_1x1_reduce_relu = F.relu(conv3_2_1x1_reduce_bn)
conv3_2_3x3_pad = F.pad(conv3_2_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv3_2_3x3 = self.conv3_2_3x3(conv3_2_3x3_pad)
conv3_2_3x3_bn = self.conv3_2_3x3_bn(conv3_2_3x3)
conv3_2_3x3_relu = F.relu(conv3_2_3x3_bn)
conv3_2_1x1_increase = self.conv3_2_1x1_increase(conv3_2_3x3_relu)
conv3_2_1x1_increase_bn = self.conv3_2_1x1_increase_bn(conv3_2_1x1_increase)
conv3_2_global_pool = F.avg_pool2d(conv3_2_1x1_increase_bn, kernel_size=(28L, 28L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv3_2_1x1_down = self.conv3_2_1x1_down(conv3_2_global_pool)
conv3_2_1x1_down_relu = F.relu(conv3_2_1x1_down)
conv3_2_1x1_up = self.conv3_2_1x1_up(conv3_2_1x1_down_relu)
conv3_2_prob = F.sigmoid(conv3_2_1x1_up)
conv3_2_1x1_increase_bn_scale = conv3_2_prob * conv3_2_1x1_increase_bn
conv3_2 = conv3_2_1x1_increase_bn_scale + conv3_1_relu
conv3_2_relu = F.relu(conv3_2)
conv3_3_1x1_reduce = self.conv3_3_1x1_reduce(conv3_2_relu)
conv3_3_1x1_reduce_bn = self.conv3_3_1x1_reduce_bn(conv3_3_1x1_reduce)
conv3_3_1x1_reduce_relu = F.relu(conv3_3_1x1_reduce_bn)
conv3_3_3x3_pad = F.pad(conv3_3_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv3_3_3x3 = self.conv3_3_3x3(conv3_3_3x3_pad)
conv3_3_3x3_bn = self.conv3_3_3x3_bn(conv3_3_3x3)
conv3_3_3x3_relu = F.relu(conv3_3_3x3_bn)
conv3_3_1x1_increase = self.conv3_3_1x1_increase(conv3_3_3x3_relu)
conv3_3_1x1_increase_bn = self.conv3_3_1x1_increase_bn(conv3_3_1x1_increase)
conv3_3_global_pool = F.avg_pool2d(conv3_3_1x1_increase_bn, kernel_size=(28L, 28L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv3_3_1x1_down = self.conv3_3_1x1_down(conv3_3_global_pool)
conv3_3_1x1_down_relu = F.relu(conv3_3_1x1_down)
conv3_3_1x1_up = self.conv3_3_1x1_up(conv3_3_1x1_down_relu)
conv3_3_prob = F.sigmoid(conv3_3_1x1_up)
conv3_3_1x1_increase_bn_scale = conv3_3_prob * conv3_3_1x1_increase_bn
conv3_3 = conv3_3_1x1_increase_bn_scale + conv3_2_relu
conv3_3_relu = F.relu(conv3_3)
conv3_4_1x1_reduce = self.conv3_4_1x1_reduce(conv3_3_relu)
conv3_4_1x1_reduce_bn = self.conv3_4_1x1_reduce_bn(conv3_4_1x1_reduce)
conv3_4_1x1_reduce_relu = F.relu(conv3_4_1x1_reduce_bn)
conv3_4_3x3_pad = F.pad(conv3_4_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv3_4_3x3 = self.conv3_4_3x3(conv3_4_3x3_pad)
conv3_4_3x3_bn = self.conv3_4_3x3_bn(conv3_4_3x3)
conv3_4_3x3_relu = F.relu(conv3_4_3x3_bn)
conv3_4_1x1_increase = self.conv3_4_1x1_increase(conv3_4_3x3_relu)
conv3_4_1x1_increase_bn = self.conv3_4_1x1_increase_bn(conv3_4_1x1_increase)
conv3_4_global_pool = F.avg_pool2d(conv3_4_1x1_increase_bn, kernel_size=(28L, 28L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv3_4_1x1_down = self.conv3_4_1x1_down(conv3_4_global_pool)
conv3_4_1x1_down_relu = F.relu(conv3_4_1x1_down)
conv3_4_1x1_up = self.conv3_4_1x1_up(conv3_4_1x1_down_relu)
conv3_4_prob = F.sigmoid(conv3_4_1x1_up)
conv3_4_1x1_increase_bn_scale = conv3_4_prob * conv3_4_1x1_increase_bn
conv3_4 = conv3_4_1x1_increase_bn_scale + conv3_3_relu
conv3_4_relu = F.relu(conv3_4)
conv4_1_1x1_proj = self.conv4_1_1x1_proj(conv3_4_relu)
conv4_1_1x1_reduce = self.conv4_1_1x1_reduce(conv3_4_relu)
conv4_1_1x1_proj_bn = self.conv4_1_1x1_proj_bn(conv4_1_1x1_proj)
conv4_1_1x1_reduce_bn = self.conv4_1_1x1_reduce_bn(conv4_1_1x1_reduce)
conv4_1_1x1_reduce_relu = F.relu(conv4_1_1x1_reduce_bn)
conv4_1_3x3_pad = F.pad(conv4_1_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_1_3x3 = self.conv4_1_3x3(conv4_1_3x3_pad)
conv4_1_3x3_bn = self.conv4_1_3x3_bn(conv4_1_3x3)
conv4_1_3x3_relu = F.relu(conv4_1_3x3_bn)
conv4_1_1x1_increase = self.conv4_1_1x1_increase(conv4_1_3x3_relu)
conv4_1_1x1_increase_bn = self.conv4_1_1x1_increase_bn(conv4_1_1x1_increase)
conv4_1_global_pool = F.avg_pool2d(conv4_1_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_1_1x1_down = self.conv4_1_1x1_down(conv4_1_global_pool)
conv4_1_1x1_down_relu = F.relu(conv4_1_1x1_down)
conv4_1_1x1_up = self.conv4_1_1x1_up(conv4_1_1x1_down_relu)
conv4_1_prob = F.sigmoid(conv4_1_1x1_up)
conv4_1_1x1_increase_bn_scale = conv4_1_prob * conv4_1_1x1_increase_bn
conv4_1 = conv4_1_1x1_increase_bn_scale + conv4_1_1x1_proj_bn
conv4_1_relu = F.relu(conv4_1)
conv4_2_1x1_reduce = self.conv4_2_1x1_reduce(conv4_1_relu)
conv4_2_1x1_reduce_bn = self.conv4_2_1x1_reduce_bn(conv4_2_1x1_reduce)
conv4_2_1x1_reduce_relu = F.relu(conv4_2_1x1_reduce_bn)
conv4_2_3x3_pad = F.pad(conv4_2_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_2_3x3 = self.conv4_2_3x3(conv4_2_3x3_pad)
conv4_2_3x3_bn = self.conv4_2_3x3_bn(conv4_2_3x3)
conv4_2_3x3_relu = F.relu(conv4_2_3x3_bn)
conv4_2_1x1_increase = self.conv4_2_1x1_increase(conv4_2_3x3_relu)
conv4_2_1x1_increase_bn = self.conv4_2_1x1_increase_bn(conv4_2_1x1_increase)
conv4_2_global_pool = F.avg_pool2d(conv4_2_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_2_1x1_down = self.conv4_2_1x1_down(conv4_2_global_pool)
conv4_2_1x1_down_relu = F.relu(conv4_2_1x1_down)
conv4_2_1x1_up = self.conv4_2_1x1_up(conv4_2_1x1_down_relu)
conv4_2_prob = F.sigmoid(conv4_2_1x1_up)
conv4_2_1x1_increase_bn_scale = conv4_2_prob * conv4_2_1x1_increase_bn
conv4_2 = conv4_2_1x1_increase_bn_scale + conv4_1_relu
conv4_2_relu = F.relu(conv4_2)
conv4_3_1x1_reduce = self.conv4_3_1x1_reduce(conv4_2_relu)
conv4_3_1x1_reduce_bn = self.conv4_3_1x1_reduce_bn(conv4_3_1x1_reduce)
conv4_3_1x1_reduce_relu = F.relu(conv4_3_1x1_reduce_bn)
conv4_3_3x3_pad = F.pad(conv4_3_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_3_3x3 = self.conv4_3_3x3(conv4_3_3x3_pad)
conv4_3_3x3_bn = self.conv4_3_3x3_bn(conv4_3_3x3)
conv4_3_3x3_relu = F.relu(conv4_3_3x3_bn)
conv4_3_1x1_increase = self.conv4_3_1x1_increase(conv4_3_3x3_relu)
conv4_3_1x1_increase_bn = self.conv4_3_1x1_increase_bn(conv4_3_1x1_increase)
conv4_3_global_pool = F.avg_pool2d(conv4_3_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_3_1x1_down = self.conv4_3_1x1_down(conv4_3_global_pool)
conv4_3_1x1_down_relu = F.relu(conv4_3_1x1_down)
conv4_3_1x1_up = self.conv4_3_1x1_up(conv4_3_1x1_down_relu)
conv4_3_prob = F.sigmoid(conv4_3_1x1_up)
conv4_3_1x1_increase_bn_scale = conv4_3_prob * conv4_3_1x1_increase_bn
conv4_3 = conv4_3_1x1_increase_bn_scale + conv4_2_relu
conv4_3_relu = F.relu(conv4_3)
conv4_4_1x1_reduce = self.conv4_4_1x1_reduce(conv4_3_relu)
conv4_4_1x1_reduce_bn = self.conv4_4_1x1_reduce_bn(conv4_4_1x1_reduce)
conv4_4_1x1_reduce_relu = F.relu(conv4_4_1x1_reduce_bn)
conv4_4_3x3_pad = F.pad(conv4_4_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_4_3x3 = self.conv4_4_3x3(conv4_4_3x3_pad)
conv4_4_3x3_bn = self.conv4_4_3x3_bn(conv4_4_3x3)
conv4_4_3x3_relu = F.relu(conv4_4_3x3_bn)
conv4_4_1x1_increase = self.conv4_4_1x1_increase(conv4_4_3x3_relu)
conv4_4_1x1_increase_bn = self.conv4_4_1x1_increase_bn(conv4_4_1x1_increase)
conv4_4_global_pool = F.avg_pool2d(conv4_4_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_4_1x1_down = self.conv4_4_1x1_down(conv4_4_global_pool)
conv4_4_1x1_down_relu = F.relu(conv4_4_1x1_down)
conv4_4_1x1_up = self.conv4_4_1x1_up(conv4_4_1x1_down_relu)
conv4_4_prob = F.sigmoid(conv4_4_1x1_up)
conv4_4_1x1_increase_bn_scale = conv4_4_prob * conv4_4_1x1_increase_bn
conv4_4 = conv4_4_1x1_increase_bn_scale + conv4_3_relu
conv4_4_relu = F.relu(conv4_4)
conv4_5_1x1_reduce = self.conv4_5_1x1_reduce(conv4_4_relu)
conv4_5_1x1_reduce_bn = self.conv4_5_1x1_reduce_bn(conv4_5_1x1_reduce)
conv4_5_1x1_reduce_relu = F.relu(conv4_5_1x1_reduce_bn)
conv4_5_3x3_pad = F.pad(conv4_5_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_5_3x3 = self.conv4_5_3x3(conv4_5_3x3_pad)
conv4_5_3x3_bn = self.conv4_5_3x3_bn(conv4_5_3x3)
conv4_5_3x3_relu = F.relu(conv4_5_3x3_bn)
conv4_5_1x1_increase = self.conv4_5_1x1_increase(conv4_5_3x3_relu)
conv4_5_1x1_increase_bn = self.conv4_5_1x1_increase_bn(conv4_5_1x1_increase)
conv4_5_global_pool = F.avg_pool2d(conv4_5_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_5_1x1_down = self.conv4_5_1x1_down(conv4_5_global_pool)
conv4_5_1x1_down_relu = F.relu(conv4_5_1x1_down)
conv4_5_1x1_up = self.conv4_5_1x1_up(conv4_5_1x1_down_relu)
conv4_5_prob = F.sigmoid(conv4_5_1x1_up)
conv4_5_1x1_increase_bn_scale = conv4_5_prob * conv4_5_1x1_increase_bn
conv4_5 = conv4_5_1x1_increase_bn_scale + conv4_4_relu
conv4_5_relu = F.relu(conv4_5)
conv4_6_1x1_reduce = self.conv4_6_1x1_reduce(conv4_5_relu)
conv4_6_1x1_reduce_bn = self.conv4_6_1x1_reduce_bn(conv4_6_1x1_reduce)
conv4_6_1x1_reduce_relu = F.relu(conv4_6_1x1_reduce_bn)
conv4_6_3x3_pad = F.pad(conv4_6_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv4_6_3x3 = self.conv4_6_3x3(conv4_6_3x3_pad)
conv4_6_3x3_bn = self.conv4_6_3x3_bn(conv4_6_3x3)
conv4_6_3x3_relu = F.relu(conv4_6_3x3_bn)
conv4_6_1x1_increase = self.conv4_6_1x1_increase(conv4_6_3x3_relu)
conv4_6_1x1_increase_bn = self.conv4_6_1x1_increase_bn(conv4_6_1x1_increase)
conv4_6_global_pool = F.avg_pool2d(conv4_6_1x1_increase_bn, kernel_size=(14L, 14L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv4_6_1x1_down = self.conv4_6_1x1_down(conv4_6_global_pool)
conv4_6_1x1_down_relu = F.relu(conv4_6_1x1_down)
conv4_6_1x1_up = self.conv4_6_1x1_up(conv4_6_1x1_down_relu)
conv4_6_prob = F.sigmoid(conv4_6_1x1_up)
conv4_6_1x1_increase_bn_scale = conv4_6_prob * conv4_6_1x1_increase_bn
conv4_6 = conv4_6_1x1_increase_bn_scale + conv4_5_relu
conv4_6_relu = F.relu(conv4_6)
conv5_1_1x1_proj = self.conv5_1_1x1_proj(conv4_6_relu)
conv5_1_1x1_reduce = self.conv5_1_1x1_reduce(conv4_6_relu)
conv5_1_1x1_proj_bn = self.conv5_1_1x1_proj_bn(conv5_1_1x1_proj)
conv5_1_1x1_reduce_bn = self.conv5_1_1x1_reduce_bn(conv5_1_1x1_reduce)
conv5_1_1x1_reduce_relu = F.relu(conv5_1_1x1_reduce_bn)
conv5_1_3x3_pad = F.pad(conv5_1_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv5_1_3x3 = self.conv5_1_3x3(conv5_1_3x3_pad)
conv5_1_3x3_bn = self.conv5_1_3x3_bn(conv5_1_3x3)
conv5_1_3x3_relu = F.relu(conv5_1_3x3_bn)
conv5_1_1x1_increase = self.conv5_1_1x1_increase(conv5_1_3x3_relu)
conv5_1_1x1_increase_bn = self.conv5_1_1x1_increase_bn(conv5_1_1x1_increase)
conv5_1_global_pool = F.avg_pool2d(conv5_1_1x1_increase_bn, kernel_size=(7L, 7L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv5_1_1x1_down = self.conv5_1_1x1_down(conv5_1_global_pool)
conv5_1_1x1_down_relu = F.relu(conv5_1_1x1_down)
conv5_1_1x1_up = self.conv5_1_1x1_up(conv5_1_1x1_down_relu)
conv5_1_prob = F.sigmoid(conv5_1_1x1_up)
conv5_1_1x1_increase_bn_scale = conv5_1_prob * conv5_1_1x1_increase_bn
conv5_1 = conv5_1_1x1_increase_bn_scale + conv5_1_1x1_proj_bn
conv5_1_relu = F.relu(conv5_1)
conv5_2_1x1_reduce = self.conv5_2_1x1_reduce(conv5_1_relu)
conv5_2_1x1_reduce_bn = self.conv5_2_1x1_reduce_bn(conv5_2_1x1_reduce)
conv5_2_1x1_reduce_relu = F.relu(conv5_2_1x1_reduce_bn)
conv5_2_3x3_pad = F.pad(conv5_2_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv5_2_3x3 = self.conv5_2_3x3(conv5_2_3x3_pad)
conv5_2_3x3_bn = self.conv5_2_3x3_bn(conv5_2_3x3)
conv5_2_3x3_relu = F.relu(conv5_2_3x3_bn)
conv5_2_1x1_increase = self.conv5_2_1x1_increase(conv5_2_3x3_relu)
conv5_2_1x1_increase_bn = self.conv5_2_1x1_increase_bn(conv5_2_1x1_increase)
conv5_2_global_pool = F.avg_pool2d(conv5_2_1x1_increase_bn, kernel_size=(7L, 7L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv5_2_1x1_down = self.conv5_2_1x1_down(conv5_2_global_pool)
conv5_2_1x1_down_relu = F.relu(conv5_2_1x1_down)
conv5_2_1x1_up = self.conv5_2_1x1_up(conv5_2_1x1_down_relu)
conv5_2_prob = F.sigmoid(conv5_2_1x1_up)
conv5_2_1x1_increase_bn_scale = conv5_2_prob * conv5_2_1x1_increase_bn
conv5_2 = conv5_2_1x1_increase_bn_scale + conv5_1_relu
conv5_2_relu = F.relu(conv5_2)
conv5_3_1x1_reduce = self.conv5_3_1x1_reduce(conv5_2_relu)
conv5_3_1x1_reduce_bn = self.conv5_3_1x1_reduce_bn(conv5_3_1x1_reduce)
conv5_3_1x1_reduce_relu = F.relu(conv5_3_1x1_reduce_bn)
conv5_3_3x3_pad = F.pad(conv5_3_1x1_reduce_relu, (1L, 1L, 1L, 1L))
conv5_3_3x3 = self.conv5_3_3x3(conv5_3_3x3_pad)
conv5_3_3x3_bn = self.conv5_3_3x3_bn(conv5_3_3x3)
conv5_3_3x3_relu = F.relu(conv5_3_3x3_bn)
conv5_3_1x1_increase = self.conv5_3_1x1_increase(conv5_3_3x3_relu)
conv5_3_1x1_increase_bn = self.conv5_3_1x1_increase_bn(conv5_3_1x1_increase)
conv5_3_global_pool = F.avg_pool2d(conv5_3_1x1_increase_bn, kernel_size=(7L, 7L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
conv5_3_1x1_down = self.conv5_3_1x1_down(conv5_3_global_pool)
conv5_3_1x1_down_relu = F.relu(conv5_3_1x1_down)
conv5_3_1x1_up = self.conv5_3_1x1_up(conv5_3_1x1_down_relu)
conv5_3_prob = F.sigmoid(conv5_3_1x1_up)
conv5_3_1x1_increase_bn_scale = conv5_3_prob * conv5_3_1x1_increase_bn
conv5_3 = conv5_3_1x1_increase_bn_scale + conv5_2_relu
conv5_3_relu = F.relu(conv5_3)
pool5_7x7_s1 = F.avg_pool2d(conv5_3_relu, kernel_size=(7L, 7L), stride=(1L, 1L), padding=(0L,), ceil_mode=False)
feat_extract = self.feat_extract(pool5_7x7_s1)
return feat_extract
@staticmethod
| |
<filename>src/Placeholders.py
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Evaluated signal values for use in HERON
"""
import os
import sys
import abc
import copy
import _utils as hutils
from base import Base
FRAMEWORK_PATH = hutils.get_raven_loc()
sys.path.append(FRAMEWORK_PATH)
from utils import InputData, InputTypes, utils, xmlUtils
sys.path.pop()
sys.path.append(os.path.join(FRAMEWORK_PATH, os.pardir, 'scripts'))
from externalROMloader import ravenROMexternal
sys.path.pop()
class Placeholder(Base):
"""
Objects that hold a place in the HERON workflow
but don't hold value until converted into the RAVEN workflow.
"""
def __init__(self, **kwargs):
"""
Constructor.
@ In, kwargs, dict, passthrough args
@ Out, None
"""
Base.__init__(self, **kwargs)
self.name = None # identifier
self._source = None # name of file? the signal should come from
self._var_names = None # LIST of names of output variable from CSV or ARMA
self._type = None # source type, such as CSV, pickle, function ...
self._target_file = None # source file to take data from
self._workingDir = kwargs['loc'] # where is the HERON input file?
@classmethod
@abc.abstractmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, specs, InputData, specs
"""
pass
def read_input(self, xml):
"""
Sets settings from input file
@ In, xml, xml.etree.ElementTree.Element, input from user
@ Out, None
"""
specs = self.get_input_specs()()
specs.parseNode(xml)
self.name = specs.parameterValues['name']
self._source = specs.value
# check source exists
if self._source.startswith('%HERON%'):
# magic word for "relative to HERON root"
heron_path = hutils.get_heron_loc()
self._target_file = os.path.abspath(self._source.replace('%HERON%', heron_path))
else:
# check absolute path
rel_interp = os.path.abspath(os.path.join(self._workingDir, self._source))
if os.path.isfile(rel_interp):
self._target_file = rel_interp
else:
# check absolute path
abs_interp = os.path.abspath(self._source)
if os.path.isfile(abs_interp):
self._target_file = abs_interp
else:
# let relative path trigger the error
self._target_file = rel_interp
# check source
if not os.path.isfile(self._target_file):
self.raiseAnError(IOError, f'File not found for <DataGenerator><{self._type}> named "{self.name}".' +
f'\nLooked in: "{self._target_file}"' +
f'\nGiven location: "{self._source}"')
return specs
def checkValid(self, case, components, sources):
"""
Check validity of placeholder given rest of system
@ In, case, HERON.Case, case
@ In, case, list(HERON.Component), components
@ In, sources, list(HERON.Placeholder), sources
@ Out, None
"""
pass # overwrite to check
def print_me(self, tabs=0, tab=' '):
"""
Prints info about self
@ In, tabs, int, number of tabs to prepend
@ In, tab, str, format for tabs
@ Out, None
"""
pre = tab*tabs
print(pre+'DataGenerator:')
print(pre+' name:', self.name)
print(pre+' source:', self._source)
print(pre+' variables:', self._var_names)
def is_type(self, typ):
"""
Checks for matching type
@ In, typ, str, type to check against
@ Out, is_type, bool, True if matching request
"""
# maybe it's not anything we know about
if typ not in ['ARMA', 'Function', 'ROM']:
return False
return eval('isinstance(self, {})'.format(typ))
def get_variable(self):
"""
Returns the variable(s) in use for this placeholder.
@ In, None
@ Out, var_names, list, variable names
"""
return self._var_names
class ARMA(Placeholder):
"""
Placeholder for signals coming from the ARMA
"""
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, specs, InputData, specs
"""
specs = InputData.parameterInputFactory('ARMA', contentType=InputTypes.StringType, ordered=False, baseNode=None,
descr=r"""This data source is a source of synthetically-generated histories trained by RAVEN.
The RAVEN ARMA ROM should be trained and serialized before using it in HERON. The text
of this node indicates the location of the serialized ROM. This location is usually relative
with respect to the HERON XML input file; however, a full absolute path can be used,
or the path can be prepended with ``\%HERON\%'' to be relative to the installation
directory of HERON.""")
specs.addParam('name', param_type=InputTypes.StringType, required=True,
descr=r"""identifier for this data source in HERON and in the HERON input file. """)
specs.addParam('variable', param_type=InputTypes.StringListType, required=True,
descr=r"""provides the names of the variables from the synthetic history generators that will
be used in this analysis.""")
# TODO someday read this directly off the model instead of asking the user!
specs.addParam('evalMode', param_type=InputTypes.StringType, required=False,
descr=r"""desired sampling mode for the ARMA. See the RAVEN manual for options. \default{clustered}""")
return specs
def __init__(self, **kwargs):
"""
Constructor.
@ In, kwargs, dict, passthrough args
@ Out, None
"""
Placeholder.__init__(self, **kwargs)
self._type = 'ARMA'
self._var_names = None # variables from the ARMA to use
self.eval_mode = None # ARMA evaluation style (clustered, full, truncated)
self.needs_multiyear = None # if not None, then this is a 1-year ARMA that needs multiyearing
self.limit_interp = None # if not None, gives the years to limit this interpolated ROM to
def read_input(self, xml):
"""
Sets settings from input file
@ In, xml, xml.etree.ElementTree.Element, input from user
@ Out, None
"""
specs = Placeholder.read_input(self, xml)
self._var_names = specs.parameterValues['variable']
self.eval_mode = specs.parameterValues.get('evalMode', 'clustered')
# check that the source ARMA exists
def checkValid(self, case, components, sources):
"""
Check validity of placeholder given rest of system
@ In, case, HERON.Case, case
@ In, case, list(HERON.Component), components
@ In, sources, list(HERON.Placeholder), sources
@ Out, None
"""
print(f'Checking ROM at "{self._target_file}" ...')
structure = hutils.get_synthhist_structure(self._target_file)
interpolated = 'macro' in structure
clustered = bool(structure['clusters'])
# segmented = bool(structure['segments']) # TODO
print(f'For DataGenerator <{self._type}> "{self.name}", detected: ' +
f'{"" if interpolated else "NOT"} interpolated, ' +
f'{"" if clustered else "NOT"} clustered.')
# expect that project life == num macro years
project_life = hutils.get_project_lifetime(case, components) - 1 # one less for construction year
if interpolated:
# if interpolated, needs more checking
interp_years = structure['macro']['num']
if interp_years >= project_life:
print(f' -> "{self.name}" interpolates {interp_years} macro steps, and project life is {project_life}, so histories will be trunctated.')
self.limit_interp = project_life
else:
raise RuntimeError(f' -> "{self.name}" interpolates {interp_years} macro steps, but project life is {project_life}!')
else:
# if single year, we can use multiyear so np
print(f' -> "{self.name}" will be extended to project life ({project_life}) macro steps using <Multicycle>.')
self.needs_multiyear = project_life
class Function(Placeholder):
"""
Placeholder for values that are evaluated on the fly
"""
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, specs, InputData, specs
"""
specs = InputData.parameterInputFactory('Function', contentType=InputTypes.StringType,
ordered=False, baseNode=None,
descr=r"""This data source is a custom Python function to provide derived values.
Python functions have access to the variables within the dispatcher. The text
of this node indicates the location of the python file. This location is usually relative
with respect to the HERON XML input file; however, a full absolute path can be used,
or the path can be prepended with ``\%HERON\%'' to be relative to the installation
directory of HERON.""")
specs.addParam('name', param_type=InputTypes.StringType, required=True,
descr=r"""identifier for this data source in HERON and in the HERON input file. """)
return specs
def __init__(self, **kwargs):
"""
Constructor.
@ In, kwargs, dict, passthrough args
@ Out, None
"""
Placeholder.__init__(self, **kwargs)
self._type = 'Function'
self._module = None
self._module_methods = {}
def __getstate__(self):
"""
Serialization.
@ In, None
@ Out, d, dict, object contents
"""
# d = super(self, __getstate__) TODO only if super has one ...
d = copy.deepcopy(dict((k, v) for k, v in self.__dict__.items() if k not in ['_module']))
return d
def __setstate__(self, d):
"""
Deserialization.
@ In, d, dict, object contents
@ Out, None
"""
self.__dict__ = d
load_string, _ = utils.identifyIfExternalModelExists(self, self._target_file, '')
module = utils.importFromPath(load_string, True)
if not module:
raise IOError(f'Module "{self._source}" for function "{self.name}" was not found!')
self._module = module
self._set_callables(module)
def read_input(self, xml):
"""
Sets settings from input file
@ In, xml, xml.etree.ElementTree.Element, input from user
@ Out, None
"""
Placeholder.read_input(self, xml)
# load module
load_string, _ = utils.identifyIfExternalModelExists(self, self._target_file, '')
module = utils.importFromPath(load_string, True)
if not module:
raise IOError(f'Module "{self._source}" for function "{self.name}" was not found!')
self._module = module
# TODO do we need to set the var_names? self._var_names = _var_names
self._set_callables(module)
def _set_callables(self, module):
"""
Build a dict of callable methods with the right format
@ In, module, python Module, module to load methods from
@ Out, None
"""
for name, member in module.__dict__.items():
# check all conditions for not acceptable formats; if none of those true, then it's a good method
## check callable as a function
if not callable(member):
continue
self._module_methods[name] = member
def evaluate(self, method, request, data_dict):
"""
Evaluates requested method in stored module.
@ In, method, str, method name
@ In, request, dict, requested | |
<gh_stars>0
#
#Authors: <NAME>,<NAME>
#
#NOTES:This class finds and returns dependencies of all packages or one
#patricular package.When run with -V option finds dependencies at file level
#
#
import os, sys, re,string
from os.path import walk
import getopt,warnings
os.system('rm -rf depresult')
vinfo = sys.version_info
pythonv = "python%d.%d"%(vinfo[0], vinfo[1])
######################################################################
# COMMAND LINE OPTIONS
######################################################################
class DEPENDENCYCHECKER:
########################################################################
# Findind packages
########################################################################
def rundependencychecker(self,pack,v=True,V=False,loc=None):
cwd = os.getcwd()
import string
packages = {}
pn =[]
for i in sys.path:
s =i.split('/')
if s[-1]=="MGLToolsPckgs" or s[-1]== 'site-packages':
#if s[-1]=='FDepPackages' or s[-1]=='RDepPackages' or s[-1]=='FSharedPackages' or s[-1]=='RSharedPackages' or s[-1]== 'site-packages':
pn.append(i)
for p in pn:
os.chdir(p)
files = os.listdir(p)
for f in files:
if not os.path.isdir(f):
continue
pwdir = os.path.join(p, f)
if os.path.exists( os.path.join( pwdir, '__init__.py')):
if not packages.has_key(f):
packages[f] = pwdir
elif os.path.exists( os.path.join( p, '.pth') ):
if not packages.has_key(f):
packages[f] = pwdir
os.chdir(cwd)
################################################################
# packages in site-packages
###################################################################
pack_site=packages.keys()
##########################################################################
# Finding list of import statements used in all packages
###########################################################################
Total_packs = []
if V == True:
if pack!=None:
packn = string.split(pack,'.')
pack =packn[0]
exec('packages={"%s":"%s"}'%(pack,packages[pack]))
if packn[-1]!='py':
pack_file =packn[-1]
if packn[-1]=='py':
pack_file =packn[-2]
else:
pack_file=None
for pack in packages:
files = []
pat = re.compile('import')
print "please wait ...."
for root, dirs, files in os.walk(packages[pack]):
# remove directories not to visit
for rem in ['CVS', 'regression', 'Tutorial', 'test','Doc','doc']:
if rem in dirs:
dirs.remove(rem)
# look for files that contain the string 'import'
for fi in files:
if fi[-3:]!='.py':
continue
if fi[-3:]=='.py':
#finds pattern "import" match in that particular file
if pack_file!=pack:
if pack_file!=None:
if fi !=pack_file+'.py':
continue
else:
candidates = []
f = open( os.path.join(root, fi) )
data = f.readlines()
f.close()
found = 0
for line in data:
match = pat.search(line)
if match:
candidates.append( (root, fi, line) )
#finds pattern "import" for packages given with option p at file level
if pack_file==pack:
candidates = []
f = open( os.path.join(root, fi) )
data = f.readlines()
f.close()
found = 0
for line in data:
match = pat.search(line)
if match:
candidates.append( (root, fi, line) )
#finds pattern "import" match for all packages at file level
else:
candidates = []
f = open( os.path.join(root, fi) )
data = f.readlines()
f.close()
found = 0
for line in data:
match = pat.search(line)
if match:
candidates.append( (root, fi, line) )
#######################################
#finding dependencies
#######################################
result= []
import string
if len(candidates)>0:
for candidate_num in candidates:
p, f, imp = candidate_num
path =string.split(p,'site-packages')[-1]
implist =[]
fromlist=[]
y =string.split(imp)
#omitting commemted imports
if '.' not in imp and y[0]=="import":
len_space = len(imp.split(' '))
len_comma=len(imp.split(','))
if (len_space -1) > len_comma:
continue
# as im import statement
if "as" in y:
for a in y:
if a=='as':
aind = y.index(a)
if '.' not in y[aind-1] :
implist.append(y[aind-1])
continue
else:
newa = y[aind-1].split('.')
implist.append(newa[0])
continue
if '#' in y:
continue
#if first word is import in the list
if y[0]=='import':
for i in range(1,len(y)):
if y[i][-1]==";":
y[i]=y[i][:-1]
if y[i] not in implist:
implist.append(y[i])
break
if 'as' in y:
break
if y[i][-1]==',':
y[i]=y[i][:-1]
if ',' in y[i]:
srg = string.split(y[i],',')
for j in srg:
if j not in implist:
implist.append(j)
continue
elif len(y[i])<=2:
continue
#if import statement is like a.q
elif len(string.split(y[i],'.'))!=1:
sr = string.split(y[i],'.')
if sr[0] not in implist:
#if module doesn't starts with __
#append to list
if sr[0][0]!='__':
implist.append(sr[0])
#if import statement with out '.'
else:
if y[i] not in implist:
#print y[i]
if y[i][0]!='__':
implist.append(y[i])
#import statement with out ',' in the end
else:
if len(y[i])==1:
continue
elif ',' in y[i]:
srg = string.split(y[i],',')
for j in srg:
if j not in implist:
implist.append(j)
continue
#import statement as a.b.c.d
elif len(string.split(y[i],'.'))>1:
sr = string.split(y[i],'.')
if sr[0] not in implist:
if sr[0][0]!='__':
implist.append(sr[0])
continue
#import statement without '.'
elif y[i] not in implist:
if y[i][0]!='__':
implist.append(y[i])
continue
for im in implist:
#try importing module in implist
try:
exec('import %s'%im)
if im == 'Pmw':
if im not in result:
if im!=pack:
result.append(im)
continue
else:
continue
#if module.__file__ exists check in
#site-packages and append to result
exec('fi = %s.__file__'%im)
fil = os.path.abspath('%s'%fi)
if os.path.exists(fil):
file = string.split(str(fil),'/')
if file[-2] in pack_site:
if file[-2] not in result:
if file[-2] !=pack:
result.append(file[-2])
elif file[-2]=='Numeric':
if 'Numeric' not in result:
if 'Numeric'!=pack:
result.append('Numeric')
elif file[-2] not in ['lib-dynload', pythonv,'lib-tk']:
if im not in result:
if im!=pack:
result.append(im)
except:
if im in ['sys','gc','thread','exceptions']:
continue
else:
if im not in result:
result.append(im)
#if first word is from in list
if y[0]=='from':
#if from statement is like a.b.c
if len(string.split(y[1],'.'))!=1:
sr = string.split(y[1],'.')
if sr[0] not in fromlist:
fromlist.append(sr[0])
else:
if y[1]!=pack:
if y[1] not in fromlist:
if y[1][0]!='__':
fromlist.append(y[1])
for i in fromlist:
#checks importing module
try:
exec('import %s'%i)
if i == 'Pmw':
if i not in result:
if i !=pack:
result.append(i)
continue
else:
continue
#if __file exixts check in site-pacakges
#and append to result
exec('fi = %s.__file__'%i)
fil = os.path.abspath('%s'%fi)
if os.path.exists(fil):
file = string.split(str(fil),'/')
if file[-2] in pack_site:
if file[-2] not in result:
if file[-2] !=pack:
result.append(file[-2])
elif file[-2]=='Numeric':
if 'Numeric' not in result:
if 'Numeric'!=pack:
result.append('Numeric')
elif file[-2] not in ['lib-dynload', pythonv,'lib-tk']:
if i not in result:
if i!=pack :
result.append(i)
except:
if i in ['sys','gc','thread','exceptions']:
continue
else:
if i not in result:
result.append(i)
listdf=[]
for r,d,lf in os.walk(packages[pack]):
for rem in ['CVS', 'regression', 'Tutorial', 'test','Doc','doc']:
if rem in d:
d.remove(rem)
#when files in pack are imported
listd = os.listdir(r)
for ld in listd:
if ld.endswith('.py')==True or ld.endswith('.so')==True:
for res in result:
if res == ld[:-3]:
if res in result:
result.remove(res)
for files in lf:
for res in result:
pat1 = re.compile('"%s"' %res)
pat2 = re.compile("%s.py" %res)
fptr=open("%s/%s" %(r,files))
lines = fptr.readlines()
for line in lines:
match1 = pat1.search(line)
match2 = pat2.search(line)
if match1 or match2:
if res in result:
ind = result.index(res)
if result[ind] not in pack_site:
del result[ind]
continue
#In case of Pmv multires,pdb2qr etc
if res in files:
if res in result:
ind = result.index(res)
if result[ind] not in pack_site:
del result[ind]
for res in result:
if res[:3] in ['tmp','TMP','win']:
result.remove(res)
continue
exec('l = len("%s")'%f)
if l>60:
exec('md = string.split("%s",",")[0]'%f)
exec('f = string.split("%s","/")[-1][:-1]'%md)
Total_packs.append('result_%s %s %s %s' %(pack,path,f,result))
#return Total_packs
else:
Total_packs.append('result_%s %s %s %s' %(pack,path,f,result))
print "result_%s %s %s %s" %(pack,path,f,result)
if Total_packs:
return Total_packs
else:
if pack != None:
#print pack
pack_list = pack.split(',')
if len(pack_list)>=1:
packs = {}
for p in pack_list:
packs[p]=packages[p]
packages = packs
print "please wait ......."
for pack in packages:
files = []
pat = re.compile('import')
candidates = []
for root, dirs, files in os.walk(packages[pack]):
# remove directories not to visit
for rem in ['CVS', 'regression', 'Tutorial', 'test','Doc','doc']:
if rem in dirs:
dirs.remove(rem)
# look for files that contain the string 'import'
for fi in files:
if fi[-3:]!='.py':
continue
#finding pattern "import" match
if fi[-3:]=='.py':
f = open( os.path.join(root, fi) )
data = f.readlines()
f.close()
found = 0
for line in data:
match = pat.search(line)
if match:
candidates.append( (root, fi, line) )
##############################
#finding dependencies
##############################
result= []
import string
for candidate_num in candidates:
#print candidate_num
p, f, imp = candidate_num
implist =[]
fromlist=[]
y =string.split(imp)
#omitting commemted imports
if '.' not in imp and y[0]=="import":
len_space = len(imp.split(' '))
len_comma=len(imp.split(','))
if (len_space -1) > len_comma:
continue
if "as" in y:
for a in y:
if a=='as':
aind = y.index(a)
if '.' not in y[aind-1] :
if y[aind-1] not in implist:
implist.append(y[aind-1])
continue
else:
newa = y[aind-1].split('.')
if newa[0] not in implist:
implist.append(newa[0])
continue
if '#' in y:
continue
#if first word is import in the list
if y[0]=='import':
for i in range(1,len(y)):
if y[i][-1]==";":
y[i]=y[i][:-1]
if y[i] not in implist:
implist.append(y[i])
break
if "as" in y:
break
if y[i][-1]==',':
y[i]=y[i][:-1]
if ',' in y[i]:
| |
int_hamming_distance(a, b):
"""
Bit-wise and between float tensors.
:param a: first tensor
:type a: torch.Tensor
:param b: first tensor
:type b: torch.Tensor
:return: bit-wise and
:rtype: torch.Tensor
"""
if not a.is_contiguous():
a = a.contiguous()
if not b.is_contiguous():
b = b.contiguous()
check_type(a, b)
cuda = is_cuda(a)
assert is_cuda(b) is cuda
assert len(a.shape) == len(b.shape), (a.shape, b.shape)
for d in range(len(a.shape)):
assert a.shape[d] == b.shape[d], (a.shape, b.shape, d)
if cuda:
dist = torch.cuda.IntTensor(a.shape).fill_(0)
else:
dist = torch.IntTensor(a.shape).fill_(0)
n = a.nelement()
shape = list(dist.shape)
grid, block = cupy.grid_block(shape)
type = str(a.dtype).replace('torch.', '')
if cuda:
cupy.cunnex('cupy_%shammingdistance' % type)(
grid=tuple(grid),
block=tuple(block),
args=[n,
a.data_ptr(),
b.data_ptr(),
dist.data_ptr()],
stream=cupy.Stream
)
else:
_n = cffi.ffi.cast('int', n)
_dist = cffi.ffi.cast('int*', dist.data_ptr())
if type == 'int32':
_a = cffi.ffi.cast('int*', a.data_ptr())
_b = cffi.ffi.cast('int*', b.data_ptr())
cffi.lib.cffi_int32hammingdistance(_n, _a, _b, _dist)
elif type == 'int16':
_a = cffi.ffi.cast('short*', a.data_ptr())
_b = cffi.ffi.cast('short*', b.data_ptr())
cffi.lib.cffi_int16hammingdistance(_n, _a, _b, _dist)
elif type == 'int8':
_a = cffi.ffi.cast('char*', a.data_ptr())
_b = cffi.ffi.cast('char*', b.data_ptr())
cffi.lib.cffi_int8hammingdistance(_n, _a, _b, _dist)
elif type == 'uint8':
_a = cffi.ffi.cast('unsigned char*', a.data_ptr())
_b = cffi.ffi.cast('unsigned char*', b.data_ptr())
cffi.lib.cffi_uint8hammingdistance(_n, _a, _b, _dist)
else:
raise NotImplementedError
return dist
# 0.5^9
INT32_FAST_RANDOM_FLIP_0001953125 = '&&&&&&&&'
# 0.5^8 * 0.75
INT32_FAST_RANDOM_FLIP_0002929688 = '|&&&&&&&&'
# 0.5^8
INT32_FAST_RANDOM_FLIP_000390625 = '&&&&&&&'
# 0.5^7 * 0.75
INT32_FAST_RANDOM_FLIP_0005859375 = '|&&&&&&&'
# 0.5^7
INT32_FAST_RANDOM_FLIP_00078125 = '&&&&&&'
# 0.5^6 * 0.75
INT32_FAST_RANDOM_FLIP_001171875 = '|&&&&&&'
# 0.5^6
INT32_FAST_RANDOM_FLIP_0015625 = '&&&&&'
# 0.5^5 * 0.75
INT32_FAST_RANDOM_FLIP_00234375 = '|&&&&&'
# 0.5^5
INT32_FAST_RANDOM_FLIP_003125 = '&&&&'
def int_fast_random_flip(input, prob=INT32_FAST_RANDOM_FLIP_001171875, protected_bits=[0]*32):
"""
Fast version of random int32 bit flips supporting only specific flip probabilities.
Inspired by https://stackoverflow.com/questions/35795110/fast-way-to-generate-pseudo-random-bits-with-a-given-probability-of-0-or-1-for-e/35811904#35811904.
Protected bits will be ensured by converting protected_bits to int, and then anding the mask with it before applying xor for bit flips.
Important: underestimates probabilities slightly!
:param input: input tensor
:type input: torch.Tensor
:param prob: probability of a flip per bit
:type prob: float
:param protected_bits:
:param protected_bits: list of length 32, indicating whether a bit can be flipped (1) or not (0)
:type protected_bits: [int]
:return: input with random bit flips
:rtype: torch.Tensor
"""
# assert (input.is_contiguous() == True)
if not input.is_contiguous():
input = input.contiguous()
assert (input.dtype == torch.int32)
assert prob in [
INT32_FAST_RANDOM_FLIP_0001953125,
INT32_FAST_RANDOM_FLIP_0002929688,
INT32_FAST_RANDOM_FLIP_000390625,
INT32_FAST_RANDOM_FLIP_0005859375,
INT32_FAST_RANDOM_FLIP_00078125,
INT32_FAST_RANDOM_FLIP_001171875,
INT32_FAST_RANDOM_FLIP_0015625,
INT32_FAST_RANDOM_FLIP_00234375,
INT32_FAST_RANDOM_FLIP_003125,
]
def generator(pattern, size, cuda=False):
if cuda:
r = torch.cuda.IntTensor(*size).random_(torch.iinfo(torch.int32).min, torch.iinfo(torch.int32).max)
else:
r = torch.IntTensor(*size).random_(torch.iinfo(torch.int32).min, torch.iinfo(torch.int32).max)
for i in range(len(pattern)):
if cuda:
a = torch.cuda.IntTensor(*size).random_(torch.iinfo(torch.int32).min, torch.iinfo(torch.int32).max)
else:
a = torch.IntTensor(*size).random_(torch.iinfo(torch.int32).min, torch.iinfo(torch.int32).max)
if pattern[i] == '&':
r = int_and(r, a)
elif pattern[i] == '|':
r = int_or(r, a)
return r
bits = ''
for protected_bit in protected_bits:
if protected_bit == 1:
bits += '0'
else:
bits += '1'
protected = int(bits, 2)
size = list(input.shape)
protected = torch.ones(size, dtype=torch.int32)*protected
if is_cuda(input):
protected = protected.cuda()
random = generator(prob, size, cuda=is_cuda(input))
random = int_and(random, protected)
output = int_xor(input, random)
return output
def int_flip(input, mask, precision=None):
"""
Flip bits in input according to mask
:param input: input tensor
:type input: torch.Tensor
:param mask: boolean mask
:type: mask: torch.Tensor
:return: input with random bit flips
:rtype: torch.Tensor
"""
if not input.is_contiguous():
input = input.contiguous()
if not mask.is_contiguous():
mask = mask.contiguous()
inferred_precision = check_type(input)
if precision is None:
precision = inferred_precision
cuda = is_cuda(input)
assert (mask.dtype == torch.bool)
assert is_cuda(mask) is cuda
assert len(input.shape) + 1 == len(mask.shape), (input.shape, mask.shape)
for d in range(len(input.shape)):
assert input.shape[d] == mask.shape[d], (input.shape, mask.shape, d)
assert mask.shape[-1] == precision, 'precision does not match, using inferred precision: %s' % (inferred_precision == precision)
output = input.new_zeros(input.shape)
n = output.nelement()
shape = list(output.shape)
grid, block = cupy.grid_block(shape)
type = str(input.dtype).replace('torch.', '')
if cuda:
cupy.cunnex('cupy_%sflip' % type)(
# https://stackoverflow.com/questions/9985912/how-do-i-choose-grid-and-block-dimensions-for-cuda-kernels
grid=tuple(grid),
block=tuple(block),
args=[n,
mask.data_ptr(),
input.data_ptr(),
output.data_ptr()],
stream=cupy.Stream
)
else:
_n = cffi.ffi.cast('int', n)
_mask = cffi.ffi.cast('bool*', mask.data_ptr())
if type == 'int32':
_input = cffi.ffi.cast('int*', input.data_ptr())
_output = cffi.ffi.cast('int*', output.data_ptr())
cffi.lib.cffi_int32flip(_n, _mask, _input, _output)
elif type == 'int16':
_input = cffi.ffi.cast('short*', input.data_ptr())
_output = cffi.ffi.cast('short*', output.data_ptr())
cffi.lib.cffi_int16flip(_n, _mask, _input, _output)
elif type == 'int8':
_input = cffi.ffi.cast('char*', input.data_ptr())
_output = cffi.ffi.cast('char*', output.data_ptr())
cffi.lib.cffi_int8flip(_n, _mask, _input, _output)
elif type == 'uint8':
_input = cffi.ffi.cast('unsigned char*', input.data_ptr())
_output = cffi.ffi.cast('unsigned char*', output.data_ptr())
cffi.lib.cffi_uint8flip(_n, _mask, _input, _output)
else:
raise NotImplementedError
return output
def int_set_zero(input, m, precision=None):
"""
Set the m LSBs to zero.
:param input: input tensor
:type input: torch.Tensor
:param m: number of LSBs
:type m: int
:return: input with m LBSs set to zero
:rtype: torch.Tensor
"""
if not input.is_contiguous():
input = input.contiguous()
inferred_precision = check_type(input)
if precision is None:
precision = inferred_precision
cuda = is_cuda(input)
assert m <= precision
output = input.new_zeros(input.shape)
n = output.nelement()
shape = list(output.shape)
grid, block = cupy.grid_block(shape)
type = str(input.dtype).replace('torch.', '')
if cuda:
cupy.cunnex('cupy_%ssetzero' % type)(
# https://stackoverflow.com/questions/9985912/how-do-i-choose-grid-and-block-dimensions-for-cuda-kernels
grid=tuple(grid),
block=tuple(block),
args=[n,
m,
input.data_ptr(),
output.data_ptr()],
stream=cupy.Stream
)
else:
_n = cffi.ffi.cast('int', n)
_m = cffi.ffi.cast('int*', m)
if type == 'int32':
_input = cffi.ffi.cast('int*', input.data_ptr())
_output = cffi.ffi.cast('int*', output.data_ptr())
cffi.lib.cffi_int32setzero(_n, _m, _input, _output)
elif type == 'int16':
_input = cffi.ffi.cast('short*', input.data_ptr())
_output = cffi.ffi.cast('short*', output.data_ptr())
cffi.lib.cffi_int16setzero(_n, _m, _input, _output)
elif type == 'int8':
_input = cffi.ffi.cast('char*', input.data_ptr())
_output = cffi.ffi.cast('char*', output.data_ptr())
cffi.lib.cffi_int8setzero(_n, _m, _input, _output)
elif type == 'uint8':
_input = cffi.ffi.cast('unsigned char*', input.data_ptr())
_output = cffi.ffi.cast('unsigned char*', output.data_ptr())
cffi.lib.cffi_uint8setzero(_n, _m, _input, _output)
else:
raise NotImplementedError
return output
def int_set(input, set1, set0, precision=None):
"""
Flip bits in input according to mask
:param input: input tensor
:type input: torch.Tensor
:param set1: boolean mask to set to 1
:type: set1: torch.Tensor
:param set0: boolean mask to set to 0
:type set0: torch.Tensor
:return: input with random bit flips
:rtype: torch.Tensor
"""
if not input.is_contiguous():
input = input.contiguous()
if not set1.is_contiguous():
set1 = set1.contiguous()
if not set0.is_contiguous():
set0 = set0.contiguous()
inferred_precision = check_type(input)
if precision is None:
precision = inferred_precision
cuda = is_cuda(input)
assert (set1.dtype == torch.bool)
assert is_cuda(set1) is cuda
assert (set0.dtype == torch.bool)
assert is_cuda(set0) is cuda
assert len(input.shape) + 1 == len(set1.shape), (input.shape, set1.shape)
assert len(input.shape) + 1 == len(set0.shape), (input.shape, set0.shape)
for d in range(len(input.shape)):
assert input.shape[d] == set1.shape[d], (input.shape, set1.shape, d)
assert input.shape[d] == set0.shape[d], (input.shape, set0.shape, d)
assert set1.shape[-1] == precision, 'precision does not match, using inferred precision: %s' % (inferred_precision == precision)
assert set0.shape[-1] == precision, 'precision does not match, using inferred precision: %s' % (inferred_precision == precision)
output = input.new_zeros(input.shape)
n = output.nelement()
shape = list(output.shape)
grid, block = cupy.grid_block(shape)
type = str(input.dtype).replace('torch.', '')
if cuda:
cupy.cunnex('cupy_%sset' % type)(
# https://stackoverflow.com/questions/9985912/how-do-i-choose-grid-and-block-dimensions-for-cuda-kernels
grid=tuple(grid),
block=tuple(block),
args=[n,
set1.data_ptr(),
set0.data_ptr(),
input.data_ptr(),
output.data_ptr()],
stream=cupy.Stream
)
else:
_n = cffi.ffi.cast('int', n)
_set1 = cffi.ffi.cast('bool*', set1.data_ptr())
_set0 = cffi.ffi.cast('bool*', set0.data_ptr())
if type == 'int32':
_input = cffi.ffi.cast('int*', input.data_ptr())
_output = cffi.ffi.cast('int*', output.data_ptr())
cffi.lib.cffi_int32set(_n, _set1, _set0, _input, _output)
elif type == 'int16':
_input = cffi.ffi.cast('short*', input.data_ptr())
_output = cffi.ffi.cast('short*', output.data_ptr())
cffi.lib.cffi_int16set(_n, _set1, _set0, _input, _output)
elif type == 'int8':
_input = cffi.ffi.cast('char*', input.data_ptr())
_output = cffi.ffi.cast('char*', output.data_ptr())
cffi.lib.cffi_int8set(_n, _set1, _set0, _input, _output)
elif type == 'uint8':
_input = cffi.ffi.cast('unsigned char*', input.data_ptr())
_output = cffi.ffi.cast('unsigned char*', output.data_ptr())
cffi.lib.cffi_uint8set(_n, _set1, _set0, _input, _output)
else:
raise NotImplementedError
return output
def int_random_flip(input, zero_prob=0.1, one_prob=0.1, protected_bits=[0]*32, rand=None, precision=None):
"""
Randomly flip bits in a int32 tensor with the given probability to flip zeros or ones.
Note that for zero and one probability of 0.1, the actually changed values are roughly a fraction of 0.075;
in contrast to 0.092 for the cupy version.
:param input: input tensor
:type input: torch.Tensor
:param rand: optional tensor holding random value per bit, shape is input.shape + [32]
:type: rand: torch.Tensor
:param zero_prob: probability to flip a zero
:type zero_prob: float
:param one_prob: probability to flip a one
:type one_prob: float
:param protected_bits: list of length 32, indicating whether a bit can be flipped (1) or not (0)
:type protected_bits: [int]
:return: input with random bit flips
:rtype: torch.Tensor
"""
if not input.is_contiguous():
input = input.contiguous()
inferred_precision = check_type(input)
if precision is None:
precision = inferred_precision
cuda = is_cuda(input)
if rand is None:
rand = torch.rand(list(input.shape) + [precision])
if cuda:
rand = rand.cuda()
if not rand.is_contiguous():
rand = rand.contiguous()
assert (rand.dtype == torch.float)
assert is_cuda(rand) is cuda
assert len(input.shape) + | |
in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::IOverlayAppletProxy', 1000, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::IOverlayFunctions', 0, '0 bytes in - 0 bytes out'),
('nn::am::service::IOverlayFunctions', 1, '0 bytes in - 0 bytes out'),
('nn::am::service::IOverlayFunctions', 2, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::am::service::IOverlayFunctions', 3, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::am::service::IOverlayFunctions', 4, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::IOverlayFunctions', 5, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::am::service::IOverlayFunctions', 6, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::IProcessWindingController', 0, '0 bytes in - 4 bytes out - OutRaw<4,1,0>'),
('nn::am::service::IProcessWindingController', 11, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::IProcessWindingController', 21, '0 bytes in - 0 bytes out - InObject<0,0>'),
('nn::am::service::IProcessWindingController', 22, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::IProcessWindingController', 23, '0 bytes in - 0 bytes out'),
('nn::am::service::IProcessWindingController', 30, '0 bytes in - 0 bytes out'),
('nn::am::service::IProcessWindingController', 40, '0 bytes in - 0 bytes out - InObject<0,0>'),
('nn::am::service::ISelfController', 0, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 1, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 2, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 3, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 4, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 9, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::am::service::ISelfController', 10, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::am::service::ISelfController', 11, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 12, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 13, '3 bytes in - 0 bytes out - InRaw<1,1,0>, InRaw<1,1,1>, InRaw<1,1,2>'),
('nn::am::service::ISelfController', 14, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 15, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::am::service::ISelfController', 16, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 17, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 18, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 19, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::am::service::ISelfController', 40, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::am::service::ISelfController', 50, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 51, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 60, '0x10 bytes in - 0 bytes out - InRaw<4,4,0>, InRaw<4,4,4>, InRaw<4,4,8>, InRaw<4,4,0xC>'),
('nn::am::service::ISelfController', 61, '1 bytes in - 0 bytes out - InRaw<1,1,0>'),
('nn::am::service::ISelfController', 62, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::am::service::ISelfController', 63, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::am::service::ISelfController', 64, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::am::service::ISelfController', 65, '0 bytes in - 0 bytes out'),
('nn::am::service::ISelfController', 66, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::am::service::ISelfController', 67, '0 bytes in - 1 bytes out - OutRaw<1,1,0>'),
('nn::am::service::IStorageAccessor', 0, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::am::service::IStorageAccessor', 10, '8 bytes in - 0 bytes out - InRaw<8,8,0>, Buffer<0,0x21,0>'),
('nn::am::service::IStorageAccessor', 11, '8 bytes in - 0 bytes out - InRaw<8,8,0>, Buffer<0,0x22,0>'),
('nn::am::service::ISystemAppletProxy', 0, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 1, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 2, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 3, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 4, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 10, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 11, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 20, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 21, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 22, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ISystemAppletProxy', 1000, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::am::service::ITransferStorageAccessor', 0, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::am::service::ITransferStorageAccessor', 1, '0 bytes in - 8 bytes out - OutHandle<0,1>, OutRaw<8,8,0>'),
('nn::am::service::IWindowController', 0, '4 bytes in - 0 bytes out - OutObject<0,0>, InRaw<4,4,0>'),
('nn::am::service::IWindowController', 1, '0 bytes in - 8 bytes out - OutRaw<8,8,0>'),
('nn::am::service::IWindowController', 10, '0 bytes in - 0 bytes out'),
('nn::am::service::IWindowController', 11, '0 bytes in - 0 bytes out'),
('nn::am::service::IWindowController', 12, '0 bytes in - 0 bytes out'),
('nn::aocsrv::detail::IAddOnContentManager', 0, '8 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<8,8,0>'),
('nn::aocsrv::detail::IAddOnContentManager', 1, '0x10 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<4,4,0>, InRaw<4,4,4>, InRaw<8,8,8>'),
('nn::aocsrv::detail::IAddOnContentManager', 2, '8 bytes in - 4 bytes out - takes pid - OutRaw<4,4,0>, InRaw<8,8,0>'),
('nn::aocsrv::detail::IAddOnContentManager', 3, '0x10 bytes in - 4 bytes out - takes pid - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<8,8,8>, InRaw<4,4,0>, InRaw<4,4,4>'),
('nn::aocsrv::detail::IAddOnContentManager', 4, '8 bytes in - 8 bytes out - OutRaw<8,8,0>, InRaw<8,8,0>'),
('nn::aocsrv::detail::IAddOnContentManager', 5, '8 bytes in - 8 bytes out - takes pid - OutRaw<8,8,0>, InRaw<8,8,0>'),
('nn::aocsrv::detail::IAddOnContentManager', 6, '0x10 bytes in - 0 bytes out - InRaw<4,4,0>, InRaw<8,8,8>'),
('nn::aocsrv::detail::IAddOnContentManager', 7, '0x10 bytes in - 0 bytes out - takes pid - InRaw<4,4,0>, InRaw<8,8,8>'),
('nn::apm::IManager', 0, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::apm::IManager', 1, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::apm::IManagerPrivileged', 0, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::apm::ISession', 0, '8 bytes in - 0 bytes out - InRaw<4,4,0>, InRaw<4,4,4>'),
('nn::apm::ISession', 1, '4 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<4,4,0>'),
('nn::apm::ISystemManager', 0, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::apm::ISystemManager', 1, '4 bytes in - 0 bytes out - OutHandle<0,1>, InRaw<4,4,0>'),
('nn::apm::ISystemManager', 2, '0 bytes in - 0x28 bytes out - OutRaw<0x28,8,0>'),
('nn::apm::ISystemManager', 3, '0 bytes in - 0x28 bytes out - OutRaw<0x28,8,0>'),
('nn::apm::ISystemManager', 4, '0 bytes in - 0 bytes out'),
('nn::arp::detail::IReader', 0, ''),
('nn::arp::detail::IReader', 1, ''),
('nn::arp::detail::IReader', 2, ''),
('nn::arp::detail::IReader', 3, ''),
('nn::arp::detail::IRegistrar', 0, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::arp::detail::IRegistrar', 1, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::arp::detail::IRegistrar', 2, ''),
('nn::arp::detail::IWriter', 0, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::arp::detail::IWriter', 1, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::audio::detail::IAudioDebugManager', 0, '0x10 bytes in - 0 bytes out - InHandle<0,1>, InRaw<8,8,8>, InRaw<4,4,0>'),
('nn::audio::detail::IAudioDebugManager', 1, '0 bytes in - 0 bytes out'),
('nn::audio::detail::IAudioDebugManager', 2, '0 bytes in - 0 bytes out'),
('nn::audio::detail::IAudioDebugManager', 3, '0 bytes in - 0 bytes out'),
('nn::audio::detail::IAudioDevice', 0, '0 bytes in - 4 bytes out - Buffer<0,6,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 1, '4 bytes in - 0 bytes out - Buffer<0,5,0>, InRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 2, '0 bytes in - 4 bytes out - Buffer<0,5,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 3, '0 bytes in - 0 bytes out - Buffer<0,6,0>'),
('nn::audio::detail::IAudioDevice', 4, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::audio::detail::IAudioDevice', 5, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 6, '0 bytes in - 4 bytes out - Buffer<0,0x22,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 7, '4 bytes in - 0 bytes out - Buffer<0,0x21,0>, InRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 8, '0 bytes in - 4 bytes out - Buffer<0,0x21,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioDevice', 10, '0 bytes in - 0 bytes out - Buffer<0,0x22,0>'),
('nn::audio::detail::IAudioDevice', 11, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::audio::detail::IAudioDevice', 12, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::audio::detail::IAudioIn', 0, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::audio::detail::IAudioIn', 1, '0 bytes in - 0 bytes out'),
('nn::audio::detail::IAudioIn', 2, '0 bytes in - 0 bytes out'),
('nn::audio::detail::IAudioIn', 3, '8 bytes in - 0 bytes out - Buffer<0,5,0>, InRaw<8,8,0>'),
('nn::audio::detail::IAudioIn', 4, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::audio::detail::IAudioIn', 5, '0 bytes in - 4 bytes out - Buffer<0,6,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioIn', 6, '8 bytes in - 1 bytes out - InRaw<8,8,0>, OutRaw<1,1,0>'),
('nn::audio::detail::IAudioIn', 7, '8 bytes in - 0 bytes out - Buffer<0,5,0>, InRaw<8,8,0>, InHandle<0,1>'),
('nn::audio::detail::IAudioIn', 8, '8 bytes in - 0 bytes out - Buffer<0,0x21,0>, InRaw<8,8,0>'),
('nn::audio::detail::IAudioIn', 9, '0 bytes in - 4 bytes out - Buffer<0,0x22,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioIn', 10, '8 bytes in - 0 bytes out - Buffer<0,0x21,0>, InRaw<8,8,0>, InHandle<0,1>'),
('nn::audio::detail::IAudioInManager', 0, '0 bytes in - 4 bytes out - Buffer<0,6,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioInManager', 1, '0x10 bytes in - 0x10 bytes out - takes pid - OutObject<0,0>, Buffer<0,5,0>, InRaw<8,4,0>, InHandle<0,1>, OutRaw<0x10,4,0>, Buffer<1,6,0>, InRaw<8,8,8>'),
('nn::audio::detail::IAudioInManager', 2, '0 bytes in - 4 bytes out - Buffer<0,0x22,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioInManager', 3, '0x10 bytes in - 0x10 bytes out - takes pid - OutObject<0,0>, Buffer<0,0x21,0>, InRaw<8,4,0>, InHandle<0,1>, OutRaw<0x10,4,0>, Buffer<1,0x22,0>, InRaw<8,8,8>'),
('nn::audio::detail::IAudioInManager', 4, '0 bytes in - 4 bytes out - Buffer<0,0x22,0>, OutRaw<4,4,0>'),
('nn::audio::detail::IAudioInManagerForApplet', 0, '0x10 bytes in - 0 bytes out - OutHandle<0,1>, InRaw<8,8,0>, InRaw<8,8,8>'),
('nn::audio::detail::IAudioInManagerForApplet', 1, '0x10 bytes in - 0 bytes out - OutHandle<0,1>, InRaw<8,8,0>, InRaw<8,8,8>'),
('nn::audio::detail::IAudioInManagerForApplet', 2, '8 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<8,8,0>'),
('nn::audio::detail::IAudioInManagerForApplet', 3, '0x18 bytes in - 0 bytes out - InRaw<8,8,8>, InRaw<4,4,0>, InRaw<8,8,0x10>'),
('nn::audio::detail::IAudioInManagerForDebugger', 0, '8 bytes in - 0 bytes out - InRaw<8,8,0>'),
('nn::audio::detail::IAudioInManagerForDebugger', | |
<gh_stars>0
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import json
from typing import Any, Dict, Optional, Type
from flask import current_app
from flask_babel import lazy_gettext as _
from marshmallow import EXCLUDE, fields, pre_load, Schema, validates_schema
from marshmallow.validate import Length, ValidationError
from marshmallow_enum import EnumField
from sqlalchemy import MetaData
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError
from superset import db
from superset.db_engine_specs import BaseEngineSpec, get_engine_specs
from superset.exceptions import CertificateException, SupersetSecurityException
from superset.models.core import ConfigurationMethod, Database, PASSWORD_MASK
from superset.security.analytics_db_safety import check_sqlalchemy_uri
from superset.utils.core import markdown, parse_ssl_cert
database_schemas_query_schema = {
"type": "object",
"properties": {"force": {"type": "boolean"}},
}
database_name_description = "A database name to identify this connection."
port_description = "Port number for the database connection."
cache_timeout_description = (
"Duration (in seconds) of the caching timeout for charts of this database. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
)
expose_in_sqllab_description = "Expose this database to SQLLab"
allow_run_async_description = (
"Operate the database in asynchronous mode, meaning "
"that the queries are executed on remote workers as opposed "
"to on the web server itself. "
"This assumes that you have a Celery worker setup as well "
"as a results backend. Refer to the installation docs "
"for more information."
)
allow_file_upload_description = (
"Allow to upload CSV file data into this database"
"If selected, please set the schemas allowed for csv upload in Extra."
)
allow_ctas_description = "Allow CREATE TABLE AS option in SQL Lab"
allow_cvas_description = "Allow CREATE VIEW AS option in SQL Lab"
allow_dml_description = (
"Allow users to run non-SELECT statements "
"(UPDATE, DELETE, CREATE, ...) "
"in SQL Lab"
)
allow_multi_schema_metadata_fetch_description = (
"Allow SQL Lab to fetch a list of all tables and all views across "
"all database schemas. For large data warehouse with thousands of "
"tables, this can be expensive and put strain on the system."
) # pylint: disable=invalid-name
configuration_method_description = (
"Configuration_method is used on the frontend to "
"inform the backend whether to explode parameters "
"or to provide only a sqlalchemy_uri."
)
impersonate_user_description = (
"If Presto, all the queries in SQL Lab are going to be executed as the "
"currently logged on user who must have permission to run them.<br/>"
"If Hive and hive.server2.enable.doAs is enabled, will run the queries as "
"service account, but impersonate the currently logged on user "
"via hive.server2.proxy.user property."
)
force_ctas_schema_description = (
"When allowing CREATE TABLE AS option in SQL Lab, "
"this option forces the table to be created in this schema"
)
encrypted_extra_description = markdown(
"JSON string containing additional connection configuration.<br/>"
"This is used to provide connection information for systems like "
"Hive, Presto, and BigQuery, which do not conform to the username:password "
"syntax normally used by SQLAlchemy.",
True,
)
extra_description = markdown(
"JSON string containing extra configuration elements.<br/>"
"1. The ``engine_params`` object gets unpacked into the "
"[sqlalchemy.create_engine]"
"(https://docs.sqlalchemy.org/en/latest/core/engines.html#"
"sqlalchemy.create_engine) call, while the ``metadata_params`` "
"gets unpacked into the [sqlalchemy.MetaData]"
"(https://docs.sqlalchemy.org/en/rel_1_0/core/metadata.html"
"#sqlalchemy.schema.MetaData) call.<br/>"
"2. The ``metadata_cache_timeout`` is a cache timeout setting "
"in seconds for metadata fetch of this database. Specify it as "
'**"metadata_cache_timeout": {"schema_cache_timeout": 600, '
'"table_cache_timeout": 600}**. '
"If unset, cache will not be enabled for the functionality. "
"A timeout of 0 indicates that the cache never expires.<br/>"
"3. The ``schemas_allowed_for_file_upload`` is a comma separated list "
"of schemas that CSVs are allowed to upload to. "
'Specify it as **"schemas_allowed_for_file_upload": '
'["public", "csv_upload"]**. '
"If database flavor does not support schema or any schema is allowed "
"to be accessed, just leave the list empty<br/>"
"4. the ``version`` field is a string specifying the this db's version. "
"This should be used with Presto DBs so that the syntax is correct<br/>"
"5. The ``allows_virtual_table_explore`` field is a boolean specifying "
"whether or not the Explore button in SQL Lab results is shown.",
True,
)
get_export_ids_schema = {"type": "array", "items": {"type": "integer"}}
sqlalchemy_uri_description = markdown(
"Refer to the "
"[SqlAlchemy docs]"
"(https://docs.sqlalchemy.org/en/rel_1_2/core/engines.html#"
"database-urls) "
"for more information on how to structure your URI.",
True,
)
server_cert_description = markdown(
"Optional CA_BUNDLE contents to validate HTTPS requests. Only available "
"on certain database engines.",
True,
)
def sqlalchemy_uri_validator(value: str) -> str:
"""
Validate if it's a valid SQLAlchemy URI and refuse SQLLite by default
"""
try:
uri = make_url(value.strip())
except (ArgumentError, AttributeError, ValueError) as ex:
raise ValidationError(
[
_(
"Invalid connection string, a valid string usually follows: "
"driver://user:password@database-host/database-name"
)
]
) from ex
if current_app.config.get("PREVENT_UNSAFE_DB_CONNECTIONS", True):
try:
check_sqlalchemy_uri(uri)
except SupersetSecurityException as ex:
raise ValidationError([str(ex)]) from ex
return value
def server_cert_validator(value: str) -> str:
"""
Validate the server certificate
"""
if value:
try:
parse_ssl_cert(value)
except CertificateException as ex:
raise ValidationError([_("Invalid certificate")]) from ex
return value
def encrypted_extra_validator(value: str) -> str:
"""
Validate that encrypted extra is a valid JSON string
"""
if value:
try:
json.loads(value)
except json.JSONDecodeError as ex:
raise ValidationError(
[_("Field cannot be decoded by JSON. %(msg)s", msg=str(ex))]
) from ex
return value
def extra_validator(value: str) -> str:
"""
Validate that extra is a valid JSON string, and that metadata_params
keys are on the call signature for SQLAlchemy Metadata
"""
if value:
try:
extra_ = json.loads(value)
except json.JSONDecodeError as ex:
raise ValidationError(
[_("Field cannot be decoded by JSON. %(msg)s", msg=str(ex))]
) from ex
else:
metadata_signature = inspect.signature(MetaData)
for key in extra_.get("metadata_params", {}):
if key not in metadata_signature.parameters:
raise ValidationError(
[
_(
"The metadata_params in Extra field "
"is not configured correctly. The key "
"%(key)s is invalid.",
key=key,
)
]
)
return value
class DatabaseParametersSchemaMixin: # pylint: disable=too-few-public-methods
"""
Allow SQLAlchemy URI to be passed as separate parameters.
This mixin is a first step in allowing the users to test, create and
edit databases without having to know how to write a SQLAlchemy URI.
Instead, each database defines the parameters that it takes (eg,
username, password, host, etc.) and the SQLAlchemy URI is built from
these parameters.
When using this mixin make sure that `sqlalchemy_uri` is not required.
"""
engine = fields.String(allow_none=True, description="SQLAlchemy engine to use")
parameters = fields.Dict(
keys=fields.String(),
values=fields.Raw(),
description="DB-specific parameters for configuration",
)
configuration_method = EnumField(
ConfigurationMethod,
by_value=True,
description=configuration_method_description,
missing=ConfigurationMethod.SQLALCHEMY_FORM,
)
# pylint: disable=no-self-use, unused-argument
@pre_load
def build_sqlalchemy_uri(
self, data: Dict[str, Any], **kwargs: Any
) -> Dict[str, Any]:
"""
Build SQLAlchemy URI from separate parameters.
This is used for databases that support being configured by individual
parameters (eg, username, password, host, etc.), instead of requiring
the constructed SQLAlchemy URI to be passed.
"""
parameters = data.pop("parameters", {})
# TODO(AAfghahi) standardize engine.
engine = (
data.pop("engine", None)
or parameters.pop("engine", None)
or data.pop("backend", None)
)
configuration_method = data.get("configuration_method")
if configuration_method == ConfigurationMethod.DYNAMIC_FORM:
engine_spec = get_engine_spec(engine)
if not hasattr(engine_spec, "build_sqlalchemy_uri") or not hasattr(
engine_spec, "parameters_schema"
):
raise ValidationError(
[
_(
'Engine spec "InvalidEngine" does not support '
"being configured via individual parameters."
)
]
)
# validate parameters
parameters = engine_spec.parameters_schema.load(parameters) # type: ignore
serialized_encrypted_extra = data.get("encrypted_extra") or "{}"
try:
encrypted_extra = json.loads(serialized_encrypted_extra)
except json.decoder.JSONDecodeError:
encrypted_extra = {}
data["sqlalchemy_uri"] = engine_spec.build_sqlalchemy_uri( # type: ignore
parameters, encrypted_extra
)
return data
def get_engine_spec(engine: Optional[str]) -> Type[BaseEngineSpec]:
if not engine:
raise ValidationError(
[
_(
"An engine must be specified when passing "
"individual parameters to a database."
)
]
)
engine_specs = get_engine_specs()
if engine not in engine_specs:
raise ValidationError(
[_('Engine "%(engine)s" is not a valid engine.', engine=engine,)]
)
return engine_specs[engine]
class DatabaseValidateParametersSchema(Schema):
class Meta: # pylint: disable=too-few-public-methods
unknown = EXCLUDE
engine = fields.String(required=True, description="SQLAlchemy engine to use")
parameters = fields.Dict(
keys=fields.String(),
values=fields.Raw(allow_none=True),
| |
<reponame>mayc2/PseudoKnot_research
# ==============================================================================
# ==============================================================================
# ht_sampling.py ===============================================================
#
# Parses output from SFold. Then carries out the Metropolis-Hastings algorithm
# on the structures SFold calculated. Prepares output in the form of a
# dot-bracket sequence denoting the structures and pseudoknots.
# ==============================================================================
# ==============================================================================
import math
import sys
import random
import os
# ==============================================================================
# Structure_SFold class ========================================================
# ==============================================================================
#
# A simple object that stores data for each structure parsed from the SFold
# output.
#
# Member variables:
# self.count - Structure number, a unique id
# self.boltzmann_weight - Boltzman weight calculated for the structure
# self.energy - Engergy calculated for the structure
# self.pairs - List of the indecies of the base pairs that form the structure.
# The list is not separated for each pair, however the matching bases
# appear next to eachother ie. if the base pairs are (1, 4) and (2, 4)
# then the list is [1, 4, 2, 3].
class Structure_SFold(object):
# ==========================================================================
# __init__ Constructor returns object ======================================
#
# Constructs the objects from the parsed data that is recieved as a list of
# arguments.
def __init__(self, arg):
super(Structure_SFold, self).__init__()
self.count = int(arg[1])
self.boltzmann_weight = float(arg[3])
self.energy = float(arg[2])
self.pairs = []
# ==========================================================================
# add_pair returns void ====================================================
#
# Adds a pair from the parsed SFold output to the structure. Used to build
# the structure after the initial data is parsed.
def add_pair(self, arg):
super(Structure_SFold, self).__init__()
x = int(arg[0])-1
y = int(arg[1])-1
for n in range(int(arg[2])):
self.pairs.append(x)
self.pairs.append(y)
x += 1
y -= 1
# ==========================================================================
# __str__ convert to string returns String =================================
#
# Converts the class into a string for printing.
def __str__(self):
return (str(self.count) + ": weight - " + str(self.boltzmann_weight) + \
", energy - " + str(self.energy) + "\n" + str(self.pairs))
# ==============================================================================
# Functions for preparing data for Metropolis-Hastings =========================
# ==============================================================================
# ==============================================================================
# get_file returns (File, File)
#
# Gets the tuple of files to be read as input. Throws an error if there aren't 3
# arguments (ht_sampling.py, <SFold_output>, <Sequence_file>) to the program.
def get_file():
if len(sys.argv) != 3:
print("Incorrect number of arguments.")
sys.exit(1)
input_file = sys.argv[1]
seq_file = sys.argv[2]
return input_file, seq_file
# ==============================================================================
# parse_seq_file returns String
#
# Parses the contents of the sequence file and returns the contents of the
# sequence as a string.
def parse_seq_file(seq_file):
file = open(seq_file)
data = file.read().splitlines()
seq = ""
for i in range(len(data)):
if i > 0 and data[i] != "":
seq += data[i]
return seq
# ==============================================================================
# parse_sfold_file return List of Structure_SFold
#
# Parses the SFold output file, placing the data into a series of
# Structure_SFold objects.
def parse_sfold_file(input_file):
file = open(input_file)
data = file.read().splitlines()
for i in range(len(data)):
data[i] = data[i].split()
#a list containing the structures
structures = []
i = 3
current = -1
while i < len(data):
#Create new structure
if data[i][0] == "Structure":
current += 1
temp = Structure_SFold(data[i])
structures.append(temp)
else:
structures[current].add_pair(data[i])
i += 1
return structures
# ==============================================================================
# MetropolisHastings(list(Structure_SFold), String)
#
# Contains code for carrying out the Metropolis-Hastings algorithm and
# functions specific to it.
def MetropolisHastings(structures, seq):
# ==========================================================================
# Functions specific to MetropolisHastings =================================
# ==========================================================================
# ==========================================================================
# sample(Structure_SFold, list(Structure_SFold)) returns Structure_SFold
#
# Randomly picks a structure in the list given for sampling. If the
# structure chosen is the same as the argument a, it randomly chooses
# again, doing so until it has two different structures.
def sample(a, structures):
b = random.choice(structures)
if a == b:
b = sample(a,structures)
return b
# ==========================================================================
# generate_dictionaries(Structure_SFold, Structure_SFold)
# returns (dict{int:int}, dict{int:int})
#
# Creates a dictionary for both of the structures that maps each base index
# to the index of its matching base ie. if the parirs for a structure are
# [1, 4, 2, 3] then the dictionary created is {1:4, 2:3, 3:2, 4:1}. This
# is done for both structures passed as arguments.
def generate_dictionaries(a,b):
pairsA = {}
pairsB = {}
for i in range(0,len(a.pairs),2):
pairsA[a.pairs[i]] = a.pairs[i+1]
pairsA[a.pairs[i+1]] = a.pairs[i]
for i in range(0,len(b.pairs),2):
pairsB[b.pairs[i]] = b.pairs[i+1]
pairsB[b.pairs[i+1]] = b.pairs[i]
return pairsA, pairsB
# ==========================================================================
# handle_ties(int, dict{int:int}, dict{int:int}, int) returns int
#
# Handles ties between pairs in the structures. This function is used
# multiple times when combining structures. Flips a coin that determines
# which dictionary (structure) to remove a pair of indecies from and
# removes it. Increments t, the number of flips (a random choice of 0 or
# 1) needed to break ties and returns it.
def handle_ties(key,a,b,t):
t += 1
if random.randint(0,1) == 0:
other = b[key]
b.pop(key)
b.pop(other)
if a[key] in b:
t = handle_ties(a[key],a,b,t)
else:
other = a[key]
a.pop(key)
a.pop(other)
if b[key] in a:
t = handle_ties(b[key],b,a,t)
return t
# ==========================================================================
# helper(dict{int:int}, dict{int:int}, int) returns int
#
# Small helper function for removing duplicates when structures are
# combined. Calls handle_ties on each pair of indecies that appears in
# both structures, which randomly choses which structure to remove the
# pair from. Returns t, the number of coin flips (a random choice of 0 or
# 1) needed to break ties.
def helper(a,b,t):
for key in a.keys():
if key in b:
t = handle_ties(key,a,b,t)
t = helper(a,b,t)
break
return t
# ==========================================================================
# remove_duplicates(dict{int:int}, dict{int:int}) returns int
#
# Checks if there are duplicates in the dictionaries of pairs Returns t, the
# number of coin flips (a random choice of 0 or 1) needed to break ties.
# Calls handle_ties and helper to perform this task.
def remove_duplicates(a,b):
t = 0
for key in a.keys():
if key in b:
t = handle_ties(key,a,b,t)
t = helper(a,b,t)
break
return t
# ==========================================================================
# trim(dict{int:int}) returns void
#
# Removes the key entries that are also values from the dictionary, so that
# a pair only appears once in the set ie. if the dictionary is
# {1:2 2:1 3:4 4:3} then it will be changed to {1:2 3:4}.
def trim(a):
for key in a.keys():
if a[key] in a.keys():
if key < a[key]:
a.pop(a[key])
else:
a.pop(key)
trim(a)
break
# ==========================================================================
# backtrack(dict{int:int}, dict{int:int}) returns list(int)
#
# Combines two dictionaries into a list of the indecies, merging them into
# an ordered list of pairs ie if the two dictionaries are {1:3 5:7} and
# {2:4, 6:9} then the combined list will be [1, 3, 2, 4, 5, 7, 6, 9]. The
# list follows the same format as the list of base pairs in the
# Structure_SFold class.
def backtrack(a,b):
trim(a)
trim(b)
keysA = list(a)
keysB = list(b)
keysA.sort()
keysB.sort()
answer = []
i = 0
j = 0
while(i < len(keysA) and j <len(keysB)):
if keysA[i] < keysB[j]:
answer.append(keysA[i])
answer.append(a[keysA[i]])
i += 1
else:
answer.append(keysB[j])
answer.append(b[keysB[j]])
j += 1
while i < len(keysA):
answer.append(keysA[i])
answer.append(a[keysA[i]])
i += 1
while j < len(keysB):
answer.append(keysB[j])
answer.append(b[keysB[j]])
j += 1
return answer
# ==========================================================================
# check(list(int)) returns void
#
# Checks that each index in the list is unique, prints a warning if
# duplicates are found.
def check(answer):
zest = set()
for item in answer:
if item in zest:
print(str(item) + "is already in the set.")
else:
zest.add(item)
# ==========================================================================
# combine(dict{int:int}, dict{int:int}) returns (list(int), int)
#
# Combines structure a and structure b. Removes the duplicate indecies in
# each dictionary (structure) and then combines them into a single list
# containing all of the pairs of the two structures. Returns a tuple of
# the combined list and t, the number of coin flips (a random choice of 0
# or 1) needed to break ties.
def combine(a,b):
t = remove_duplicates(a,b)
answer = backtrack(a,b)
check(answer)
return answer, t
# ==========================================================================
# HotKnots(list(pairs)) returns float
#
# Calls HotKnots on the data given by the function. Structure comes in as a
# as a list of paired indecies. A dot-bracket representation of the
# structure is created and is sent to HotKnots along with the ASCII
# representation of the sequence. HotKnots is outputs energy calculations
# for the structure. The output of HotKnots is read and the free energy
# for the structure is returned.
def HotKnots(pairs):
dot_bracket = " \"" + BracketedStructureFromPairs(pairs, len(seq)) +"\""
cmd = '$HOTKNOTS/bin/computeEnergy -s ' + seq + dot_bracket + \
' > $RESEARCH/output_files/hotknots_out.txt'
relative_path = os.getcwd()
os.chdir(relative_path+"/HotKnots_v2.0/bin")
os.system(cmd)
os.chdir(relative_path)
energy_file = "output_files/hotknots_out.txt"
file = open(energy_file,'r',encoding='iso-8859-15')
data = file.read().splitlines()
line = data[len(data)-1]
line = line.split()
if line[1] == "the":
print("HotKnots failed")
free_energy = float(line[1])
free_energy_wo_dangling = float(line[2])
return free_energy
# ==========================================================================
# get_structure_energy(pairs) returns float
#
# Calls Nupack and return the resulting energy calculation. Structure comes
# in as a as a list of paired indecies. A dot-bracket representation of
# the structure is created and is put in an input file for Nupack. Nupac
# is run with the input file and the output from Nupack is read in,
# parsed, and the energy calculation is returned.
def get_structure_energy(pairs):
cmd = '$NUPACKHOME/bin/energy -pseudo $RESEARCH/input_files/nupack_in \
> $RESEARCH/output_files/nupack_out.txt'
dot_bracket = StructureFromPairs(pairs,len(seq))
if generate_inFile(dot_bracket) != 0:
print("ERROR: Failed to generate nupack_in.in file")
sys.exit(1)
os.system(cmd)
energy_file = "output_files/nupack_out.txt"
file = open(energy_file)
data = file.read().splitlines()
if len(data) <= 2:
print("length of Nupack file is 0")
sys.exit(1)
if (data[len(data)-2] == "% Energy (kcal/mol):"):
# print("Nupack Succeeded")
return float(data[len(data)-1])
else:
# print("Nupack failed, establishing large energy value (10**8)")
return float(10**8)
# ==========================================================================
# generate_inFile(String) returns int
#
# Writes the dot-bracket structure to the input file for Nupack. Returns 0
# if successful.
def generate_inFile(dot_bracket):
nupack_file = "input_files/nupack_in.in"
file = open(nupack_file, "w")
file.write(seq+"\n")
file.write(dot_bracket)
file.close()
return 0
# ==========================================================================
# should_switch_stacks(list(int), int, (int, int)) returns boolean
#
# Returns True if the pair crosses over a pair of indecies in the pairs
# list. Indicates if the function constructing the dot-bracket format
# should switch stacks.
def should_switch_stacks(pairs, i, pair):
return (pairs[i] < pair[0] \
and (pairs[i+1] > pair[0] and pairs[i+1] < pair[1])) \
or ((pairs[i] > pair[0] and pairs[i] < pair[1]) \
and pairs[i+1] > pair[1])
# ==========================================================================
# BracketedStructureFromPairs(list(int), int) returns String
#
# Creates the dot-bracket formatted representation of the combined
# structure. Takes the list of base pairs and their length and adds them
# to one of two lists (stack0 and stack1). It places pairs into | |
import os
import logging
import tempfile
import rasterio
import whitebox
import yaml
import numpy as np
import geopandas as gpd
import cerf.package_data as pkg
from rasterio import features
from cerf.utils import suppress_callback
# instantiate whitebox toolset
wbt = whitebox.WhiteboxTools()
class Interconnection:
"""Calculate interconnection costs per grid cell in $ / yr using:
Interconnection Cost ($ / yr) = Distance to nearest suitable transmission line (km) *
Electric grid interconnection captial cost ($ / km) *
Annuity factor
+ (if gas-fired technology) Distance to nearest suitable gas pipeline (km) *
Gas interconnection captial cost ($ / km) *
Annuity factor
where, Annuity factor is (d(1 + d)**n) / ((1 + d)**n - 1)
where, d = real annual discount rate (%), n = asset lifetime (years)
:param technology_dict: Dictionary containing technology specific information from the config file
:type technology_dict: dict
:param technology_order: Order of technologies to process
:type technology_order: list
:param region_raster_file: Full path with file name and extension to the region raster file that
assigns a region ID to each raster grid cell
:type region_raster_file: str
:param region_abbrev_to_name_file: Full path with file name and extension to the region abbreviation to name
YAML reference file
:type region_abbrev_to_name_file: str
:param region_name_to_id_file: Full path with file name and extension to the region name to ID YAML
reference file
:type region_name_to_id_file: str
:param substation_file: Full path with file name and extension to the input substations shapefile.
If None, CERF will use the default data stored in the package.
:type substation_file: str
:param transmission_costs_dict: A dictionary containing the cost of connection per km to a substation
having a certain minimum voltage range. Default is to load from the
CERF data file 'costs_per_kv_substation.yml' by specifying 'None'
:type transmission_costs_dict: dict
:param transmission_costs_file: A YAML file containing the cost of connection per km to a substation
having a certain minimum voltage range. Default is to load from the
CERF data file 'costs_per_kv_substation.yml' by specifying 'None'
:type transmission_costs_file: str
:param pipeline_costs_dict: A dictionary containing the cost of connection per km to a gas pipeline.
Default is to load from the CERF data file 'costs_gas_pipeline.yml' by
specifying 'None'
:type pipeline_costs_dict: dict
:param pipeline_costs_file: A YAML file containing the cost of connection per km to a gas pipeline.
Default is to load from the CERF data file 'costs_gas_pipeline.yml' by
specifying 'None'
:type pipeline_costs_file: str
:param pipeline_file: Full path with file name and extension to the input pipelines shapefile.
If None, CERF will use the default data stored in the package.
:type pipeline_file: str
:param output_rasterized_file: Write distance raster; if True, set 'output_dir' value
:type output_rasterized_file: bool
:param output_dist_file: Write distance raster; if True, set 'output_dir' value
:type output_dist_file: bool
:param output_alloc_file: Write allocation file; if True, set 'output_dir' value
:type output_alloc_file: bool
:param output_cost_file: Write cost file; if True, set 'output_dir' value
:type output_cost_file: bool
:param interconnection_cost_file: Full path with file name and extension to a preprocessed interconnection
cost NPY file that has been previously written. If None, IC will be
calculated.
:type interconnection_cost_file: str
:param output_dir: Full path to a directory to write outputs to if desired
:type output_dir: str
"""
def __init__(self, template_array, technology_dict, technology_order, region_raster_file,
region_abbrev_to_name_file, region_name_to_id_file, substation_file=None,
transmission_costs_dict=None, transmission_costs_file=None, pipeline_costs_dict=None,
pipeline_costs_file=None, pipeline_file=None, output_rasterized_file=False, output_dist_file=False,
output_alloc_file=False, output_cost_file=False, interconnection_cost_file=None, output_dir=None):
self.template_array = template_array
self.technology_dict = technology_dict
self.technology_order = technology_order
self.region_raster_file = region_raster_file
self.region_abbrev_to_name_file = region_abbrev_to_name_file
self.region_name_to_id_file = region_name_to_id_file
self.substation_file = substation_file
self.transmission_costs_dict = transmission_costs_dict
self.transmission_costs_file = transmission_costs_file
self.pipeline_costs_dict = pipeline_costs_dict
self.pipeline_costs_file = pipeline_costs_file
self.pipeline_file = pipeline_file
self.output_rasterized_file = output_rasterized_file
self.output_dist_file = output_dist_file
self.output_alloc_file = output_alloc_file
self.output_cost_file = output_cost_file
self.interconnection_cost_file = interconnection_cost_file
self.output_dir = output_dir
# calculate electricity transmission infrastructure costs
self.substation_costs = self.transmission_to_cost_raster(setting='substations')
# if there are any gas technlogies present, calculate gas pipeline infrastructure costs
self.pipeline_costs = self.transmission_to_cost_raster(setting='pipelines')
@staticmethod
def calc_annuity_factor(discount_rate, lifetime):
"""Calculate annuity factor."""
fx = pow(1.0 + discount_rate, lifetime)
return discount_rate * fx / (fx - 1.0)
def get_pipeline_costs(self):
"""Get the costs of gas pipeline interconnection per kilometer."""
if self.pipeline_costs_dict is not None:
logging.info(f"Using gas pipeline costs from user defined dictionary: {self.pipeline_costs_dict}")
return self.pipeline_costs_dict.get('gas_pipeline_cost')
if self.pipeline_costs_file is not None:
f = self.pipeline_costs_file
logging.info(f"Using gas pipeline costs from file: {f}")
else:
f = pkg.get_costs_gas_pipeline()
logging.info(f"Using gas pipeline costs from default file: {f}")
with open(f, 'r') as yml:
yaml_dict = yaml.load(yml, Loader=yaml.FullLoader)
return yaml_dict.get('gas_pipeline_cost')
def process_substations(self):
"""Process input substations from shapefile."""
# load cost dictionary from package data if none passed
if (self.transmission_costs_dict is None) and (self.transmission_costs_file is None):
default_kv_file = pkg.get_costs_per_kv_substation_file()
logging.info(f"Using default substation costs from file: {default_kv_file}")
self.transmission_costs_dict = pkg.costs_per_kv_substation()
elif self.transmission_costs_file is not None:
logging.info(f"Using substation costs from file: {self.transmission_costs_file}")
with open(self.transmission_costs_file, 'r') as yml:
self.transmission_costs_dict = yaml.load(yml, Loader=yaml.FullLoader)
if self.substation_file is None:
sub_file = pkg.get_substation_file()
logging.info(f"Using default substation file: {sub_file}")
return gpd.read_file(sub_file)
else:
logging.info(f"Using substation file: {self.substation_file}")
# load file
gdf = gpd.read_file(self.substation_file)
# detect existing raster value binning for rasterization
if '_rval_' in gdf.columns:
logging.info("Using current '_rval_' field found in substation file which is used in rasterization.")
logging.info("If '_rval_' field was included unintentionally, please remove from file and re-run.")
return gdf
else:
# make all column names lower case
gdf.columns = [i.lower() for i in gdf.columns]
# assign a field to rasterize by containing the cost of transmission per km
gdf['_rval_'] = 0
for i in self.transmission_costs_dict.keys():
gdf['_rval_'] = np.where((gdf['min_volt'] >= self.transmission_costs_dict[i]['min_voltage']) &
(gdf['min_volt'] <= self.transmission_costs_dict[i]['max_voltage']),
self.transmission_costs_dict[i]['thous_dollar_per_km'],
gdf['_rval_'])
return gdf
def process_pipelines(self):
"""Select natural gas pipelines data that have a length greater than 0.
:returns: A geodataframe containing the target pipelines
"""
if self.pipeline_file is None:
f = pkg.get_default_gas_pipelines()
logging.info(f"Using default gas pipeline file: {f}")
# read in default shapefile for pipelines
gdf = gpd.read_file(f)
# set field for rasterize
gdf['_rval_'] = self.get_pipeline_costs()
return gdf
else:
logging.info(f"Using gas pipeline file: {self.pipeline_file}")
# read in data and reproject
gdf = gpd.read_file(self.pipeline_file)
# only keep features with a length > 0
gdf = gdf.loc[gdf.geometry.length > 0].copy()
# set field for rasterize
gdf['_rval_'] = self.get_pipeline_costs()
return gdf
def transmission_to_cost_raster(self, setting):
"""Create a cost per grid cell in $/km from the input GeoDataFrame of transmission infrastructure having a cost
designation field as '_rval_'
:param setting: Either 'substations' or 'pipelines'
:type setting: str
:return: Array of transmission interconnection cost per grid cell
"""
# conversion factor for meters to km
m_to_km_factor = 0.001
if setting == 'substations':
infrastructure_gdf = self.process_substations()
elif setting == 'pipelines':
infrastructure_gdf = self.process_pipelines()
else:
raise ValueError(
f"Incorrect setting '{setting}' for transmission data. Must be 'substations' or 'pipelines'")
with rasterio.open(self.region_raster_file) as src:
# create 0 where land array
arr = (src.read(1) * 0).astype(rasterio.float64)
# update metadata datatype to float64
metadata = src.meta.copy()
metadata.update({'dtype': rasterio.float64})
# reproject transmission data if necessary
if infrastructure_gdf.crs != src.crs:
infrastructure_gdf = infrastructure_gdf.to_crs(src.crs)
# get shapes
shapes = ((geom, value) for geom, value in zip(infrastructure_gdf.geometry, infrastructure_gdf['_rval_']))
with tempfile.TemporaryDirectory() as tempdir:
# if write desired
if any((self.output_rasterized_file, self.output_dist_file, self.output_alloc_file, self.output_cost_file)):
if self.output_dir is None:
msg = "If writing rasters to file must specify 'output_dir'"
logging.error(msg)
raise NotADirectoryError(msg)
else:
out_rast = os.path.join(self.output_dir, f'cerf_transmission_raster_{setting}.tif')
out_dist = os.path.join(self.output_dir, f'cerf_transmission_distance_{setting}.tif')
out_alloc = os.path.join(self.output_dir, f'cerf_transmission_allocation_{setting}.tif')
out_costs = os.path.join(self.output_dir, f'cerf_transmission_costs_{setting}.tif')
else:
out_rast = os.path.join(tempdir, f'cerf_transmission_raster_{setting}.tif')
out_dist = os.path.join(tempdir, f'cerf_transmission_distance_{setting}.tif')
out_alloc = os.path.join(tempdir, f'cerf_transmission_allocation_{setting}.tif')
out_costs = os.path.join(tempdir, f'cerf_transmission_costs_{setting}.tif')
# rasterize transmission vector data and write to memory
with rasterio.open(out_rast, 'w', **metadata) as dataset:
# burn features into raster
burned = features.rasterize(shapes=shapes, fill=0, out=arr, transform=dataset.transform)
# write the outputs to file
dataset.write_band(1, burned)
# calculate Euclidean distance and write raster; result just stores the return value 0
dist_result = wbt.euclidean_distance(out_rast, out_dist, callback=suppress_callback)
# calculate Euclidean allocation and write raster
alloc_result = wbt.euclidean_allocation(out_rast, out_alloc, callback=suppress_callback)
with rasterio.open(out_dist) as dist:
dist_arr = dist.read(1)
with rasterio.open(out_alloc) as alloc:
alloc_arr = alloc.read(1)
with rasterio.open(out_costs, 'w', **metadata) as out:
# distance in km * the cost of the nearest substation; outputs thous$/km
cost_arr = (dist_arr * m_to_km_factor) * alloc_arr
out.write(cost_arr, 1)
return cost_arr
def generate_interconnection_costs_array(self):
"""Calculate the costs of interconnection for each technology."""
# if a preprocessed file has been provided, load and return it
if self.interconnection_cost_file is | |
"""
This File contains Pythons Program related to List, Questiona take from Geeks of Geeks.
Some Libraries are imported before all the Programs Please check them before trying any program
"""
#Creating some inputs for testing
L1 = [12, 35, 9, 56, 24]
L2 = [1, 2, 3]
L3 = [10,20,30,40,50]
L4 = [2,3,7,13,17,23]
L5 = [-1,1,-2,2,-3,3,-4,4]
L7 = [12,[], 35,[], 9,[], 56, 24]
L6 = [[2 ,1, 3],
[4, 5, 7],
[6, 9, 8]]
L8 = [1,2,3,2,3,1,6,7,6,7,4,5,8,8,9,6,9,8,9,4]
tuples = [(), ('ram','15','8'), (), ('laxman', 'sita'), ('krishna', 'akbar', '45'), ('',''),()]
#Import Libraries that can be required in the programs
import functools as fs
from typing import Counter
import numpy as np
import math
from numpy.core.fromnumeric import sort
"""
1) Basic program to interchange first and last elements in a list
"""
#Method 1 Basic Swap
def interchange(x):
temp = x[0]
x[0] = x[-1]
x[-1] = temp
return x
print(interchange(L1))
#Time taken: 0.0009970664978027344
#Method 2 Pythonic Swap
def interchange1(x):
x[0],x[-1] = x[-1],x[0]
return x
print(interchange1(L1))
#Time taken: 0.0009958744049072266
"""
2) Basic program to swap two elements in a list(input list and 2 places to be swapped, taking first elements as position one(not index))
"""
def Swap2ListEle(x,a,b):
x[a],x[b] = x[b],x[a]
return x
a = input("Enter First Position: ")
b = input("Enter Second Position: ")
print(Swap2ListEle(L1,a -1,b -1))
#Time taken: 0.0010004043579101562
"""
3) Basic Ways to find length of list
"""
#First way
print(len(L1))
#Second Way naive one counting
count = 0
for i in L1:
count += 1
print(count)
#Third way by importing operator length_hint
from operator import length_hint
print(length_hint(L1))
"""
4) Basic Ways to check if element exists in list
"""
#Method 1: Naive one Iterating through all list
def checkEleNaive(x,a):
for i in x:
if i == a:
return "Element found in the list"
return "Element Not Found"
print(checkEleNaive(L1,9))
#Time taken: 0.0020017623901367188
#Method 2: Using in
def checkEleNaive1(x,a):
if a in x:
return "Element found in the list"
return "Element Not Found"
print(checkEleNaive1(L1,9))
#Time taken: 0.0009968280792236328
"""
5) Basic Different ways to clear a list in Python
"""
#Method 1 , using clear method
print("Before Clear: ",L1)
L1.clear()
print("After Clear: ",L1)
#Method 2, Multiplying all elements by 0 This will make elements 0
print(map(lambda x : x*0, L2))
#Method 3, reinitializing
L3=[]
print(L3)
#Method 4, multiplying list with 0
L4 *= 0
print(L4)
#Method 5, using del function
del L1[:]
print(L1)
"""
6) Basic program Reversing a List
"""
#Method 1 , using reversed function
def reverse1(x):
return [i for i in reversed(x)]
print(reverse1(L1))
#Method 2 , using reverse
def reverse2(x):
x.reverse()
return x
print(reverse2(L2))
#Method 3, using slicing
def reverse3(x):
x1 = x[::-1]
return x1
print(reverse3(L3))
"""
7) Basic program to find sum of elements in list
"""
#Method 1 using for loop
def listsum(x):
sum = 0
for i in x:
sum += i
return sum
print("Sum of elements of array is ",listsum(L1))
#Method 2 Using inbuilt sum function
ans = sum(L1)
print("Sum is ",ans)
#Method 3 using lambda and reduce
print("Here the sum is ",fs.reduce(lambda x,y: x + y , L1))
"""
8) Basic Multiply all numbers in the list
"""
#Method 1, using reduce
def ListMultiplyReduce(x):
ans = fs.reduce(lambda x,y:x*y, x)
return ans
print(ListMultiplyReduce(L2))
#Time taken: 0.0009980201721191406
#Method 2 , using traversal
def ListMultiplyTraverse(x):
ans = 1
for i in x:
ans *= i
return ans
print(ListMultiplyTraverse(L1))
#Time taken: 0.000997304916381836
#Method 3, using numpy
def ListMultiplyNumpy(x):
ans = np.prod(x)
return ans
print(ListMultiplyNumpy(L1))
#Time taken: 0.0008940696716308594
#Method 4, using math library
def ListMultiplyMath(x):
ans = math.prod(x)
return ans
print(ListMultiplyMath(L1))
#Time taken: 0.0009989738464355469
"""
9) Basic program to find smallest number in a list
"""
#Method 1, using numpy min
def SmallElementList(x):
ans = np.min(x)
return ans
print(SmallElementList(L1))
#Time taken: 0.002989053726196289
#Method 2, traversing
def SmallElementsListTraverse(x):
min = x[0]
for i in range(len(x)):
if x[i] < min:
min = x[i]
return min
print(SmallElementsListTraverse(L1))
#Time taken: 0.002994537353515625
#Method 3, using sort() function
def SmallElementListSort(x):
x.sort()
return x[0]
print(SmallElementListSort(L1))
#Time taken: 0.0009975433349609375
#Method 4, using inbuilt min
print(min(L1))
#Time taken: 0.0020165443420410156
"""
10) Basic program to find largest number in a list
"""
#Method 1, using numpy max
def LargeElementList(x):
ans = np.max(x)
return ans
print(LargeElementList(L1))
#Time taken: 0.003988504409790039
#Method 2, traversing
def LargeElementsListTraverse(x):
max = x[0]
for i in range(len(x)):
if x[i] > max:
max = x[i]
return max
print(LargeElementsListTraverse(L1))
#Time taken: 0.0020012855529785156
#Method 3, using sort() function
def LargeElementListSort(x):
x.sort()
return x[-1]
print(LargeElementListSort(L1))
#Time taken: 0.000995635986328125
#Method 4, using inbuilt min
print(max(L1))
#Time taken: 0.0009975433349609375
"""
11) Basic program to find second largest number in a list
"""
#Method 1 , Using Sort
def SecondLargeElementListSort(x):
x.sort()
return x[-2]
print(SecondLargeElementListSort(L1))
#Time taken: 0.0010037422180175781
#Method 2, Using sorted
print(sorted(L1)[-2])
#Time taken: 0.0019876956939697266
#Method 3, removing max thus second largest becomes max by converting to set
def SecondLargeElementListRemove(x):
set1 = set(x)
set1.remove(max(set1))
return max(set1)
print(SecondLargeElementListRemove(L1))
#Time taken: 0.000997304916381836
#Method 4, removing max thus second largest becomes max
def SecondLargeElementListRemove1(x):
x.remove(max(x))
return max(x)
print(SecondLargeElementListRemove1(L3))
#Time taken: 0.0019948482513427734
"""
12) Basic program to find N largest elements from a list
"""
#Method 1, using sort
n1 = int(input("Tell a number you want number of largest numbers from list: "))
def NLargest(x):
x.sort(reverse = True)
return x[0:n1]
print(NLargest(L1))
#Method 2, using sorted
n1 = int(input("Tell a number you want number of largest numbers from list: "))
print(sorted(L1,reverse=True)[0:n1])
#Method 3, using sort but not reversing
n1 = int(input("Tell a number you want number of largest numbers from list: "))
L1.sort()
print(L1[-n1:])
#Method 4, using traversal, remove, append
n1 = int(input("Tell a number you want number of largest numbers from list: "))
def Nlargest1(x,n1):
newx = []
for i in range(n1):
max = 0
for i in range(len(x)):
if x[i] > max:
max = x[i]
x.remove(max)
newx.append(max)
return newx
print(Nlargest1(L1,n1))
"""
13) Basic program to print even numbers in a list
"""
#Method 1, naive way 1 , creating new empty list and appending even elements
def EvenElementList1(x):
x1 =[]
for i in x:
if i % 2 == 0:
x1.append(i)
return x1
print(EvenElementList1(L1))
#Time taken: 0.0009970664978027344
#Method 2, naive way 2, directly printing the elements, here none will be there in output as there is no return and 2 print
def EvenElementList3(x):
for i in x:
if i % 2 == 0:
print(i,end=" ")
print(EvenElementList3(L1))
#Time taken: 0.0019915103912353516
#Method 3, Naive way 3 Using while loop, here none will be there in output as there is no return and 2 print
def EvenElementList2(x):
i = 0
while i < len(x):
if x[i] % 2 == 0:
print(x[i],end=" ")
i += 1
print(EvenElementList2(L1))
#Time taken: 0.0019927024841308594
#Method 4, Pythonic way using lambda
def EvenElementList4(x):
x1 = list(filter(lambda x: (x % 2 == 0), x))
return x1
print(EvenElementList4(L1))
#Time taken: 0.0009965896606445312
#Method 5, Pythonic way using list comprehension
def EvenElementList(x):
return [i for i in x if i % 2 == 0]
print(EvenElementList(L1))
#Time taken: 0.000997304916381836
"""
14) Basic program to print odd numbers in a List
"""
#Method 1, naive way 1 , creating new empty list and appending odd elements
def OddElementList1(x):
x1 =[]
for i in x:
if i % 2 != 0:
x1.append(i)
return x1
print(OddElementList1(L1))
#Time taken: 0.0029916763305664062
#Method 2, naive way 2, directly printing the elements, here none will be there in output as there is no return and 2 print
def OddElementList3(x):
for i in x:
if i % 2 != 0:
print(i,end=" ")
print(OddElementList3(L1))
#Time taken: 0.0009980201721191406
#Method 3, Naive way 3 Using while loop, here none will be there in output as there is no return and 2 print
def OddElementList2(x):
i = 0
while i < len(x):
if x[i] % 2 != 0:
print(x[i],end=" ")
i += 1
print(OddElementList2(L1))
#Time taken: 0.001972675323486328
#Method 4, Pythonic way using lambda
def OddElementList4(x):
x1 = list(filter(lambda x: (x % 2 != 0), x))
return x1
print(OddElementList4(L1))
#Time taken: 0.000993490219116211
#Method 5, Pythonic way using list comprehension
def OddElementList(x):
return [i for i in x if i % 2 != 0]
print(OddElementList(L1))
#Time taken: 0.0010035037994384766
"""
15) Basic program to print positive numbers in a list
"""
#Method 1 , List Comprehension
def PositiveList(x):
return [ i for i in x if i > 0]
print(PositiveList(L5))
#Time taken: 0.0009975433349609375
#Method 2, using filter
def PositiveList1(x):
return list(filter(lambda i : i > 0 , x))
print(PositiveList1(L5))
#Time taken: 0.0009953975677490234
#Method 3 , Naive Method using for loop
def PositiveList2(x):
for i in x:
if i >= 0:
print(i, end = " ")
print(PositiveList2(L5))
#Time taken: 0.0019981861114501953
#Method 4 , Naive Method using while loop
def PositiveList3(x):
i = 0
while i < len(x):
if x[i] >= 0:
print(x[i], end = " ")
i += 1
print(PositiveList3(L5))
#Time taken: 0.0019979476928710938
"""
16) Basic program to print negative numbers in a list
"""
#Method 1 , List Comprehension
def PositiveList(x):
return [ i for i in x if i < 0]
print(PositiveList(L5))
#Time taken: 0.0009975433349609375
#Method 2, using filter
def PositiveList1(x):
return list(filter(lambda i : i < 0 , x))
print(PositiveList1(L5))
#Time taken: 0.0009953975677490234
#Method 3 , Naive Method using for loop
def PositiveList2(x):
for i in x:
if i < 0:
print(i, end = " ")
print(PositiveList2(L5))
#Time taken: 0.0019981861114501953
#Method 4 , Naive Method using while loop
def PositiveList3(x):
i = 0
while i < len(x):
if x[i] < 0:
print(x[i], end = " ")
i += 1
print(PositiveList3(L5))
#Time taken: 0.0019979476928710938
"""
17) Basic program to print all positive numbers in a range
"""
n1 = int(input("Start: "))
n2 = int(input("End: | |
<reponame>zuoziji/transaction
# -*- coding: utf-8 -*-
"""
1)根据 wind_code 获取净值记录,调整成 return rate 生成csv文件
2)调用 fhs_garch.R,返回对应压力测试 csv文件
3)读取压力测试文件,进行整个
"""
from rpy2 import robjects
from os import path
import matplotlib.pyplot as plt # pycharm 需要通过现实调用 plt.show 才能显示plot
from itertools import product
import numpy as np
import pandas as pd
from datetime import datetime, date, timedelta
import matplotlib.pyplot as plt
from fh_tools.fh_utils import get_cache_file_path
import json
from config_fh import get_db_engine, ANALYSIS_CACHE_FILE_NAME, get_db_session, get_redis, STR_FORMAT_DATE,\
STRESS_TESTING_SIMULATE_COUNT_FHS_GARCH
import logging
import numpy as np
logger = logging.getLogger()
DATE_STR_FORMAT = '%Y-%m-%d'
ENABLE_CACHE = False
SQL_FUND_NAV_FRIDAY = """SELECT nav_date_friday, nav_acc
FROM fund_nav,
(
SELECT wind_code,
(nav_date + INTERVAL (4 - WEEKDAY(nav_date)) DAY) AS nav_date_friday,
MAX(nav_date) AS nav_date_max
FROM fund_nav
where wind_code = :wind_code1
GROUP BY wind_code, nav_date_friday
) as fund_nav_g
where fund_nav.wind_code = :wind_code2
and fund_nav.wind_code = fund_nav_g.wind_code
and fund_nav.nav_date = fund_nav_g.nav_date_max"""
QUANTILE_LIST = [.05, .15, .25, .50, .75, .85, .95]
def get_fund_nav_acc(wind_code):
"""
获取制定基金净值数据,日期转换到每周周五,每周之多一条数据
:param wind_code:
:return:
"""
with get_db_session() as session:
table = session.execute(SQL_FUND_NAV_FRIDAY, {'wind_code2': wind_code, 'wind_code1': wind_code})
date_nav_dic = dict(table.fetchall())
nav_series = pd.Series(date_nav_dic, name=wind_code)
return nav_series
def get_return_rate_csv(nav_s, input_file_path):
"""
将净值数据转化为收益率数据生成csv文件,同时返回对应的日期序列
如果该净值没有回撤,则返回空,同时记录warning
:param nav_s: 净值数据序列
:param input_file_path: 生成csv文件的路径
:return:
"""
# wind_code = nav_s.name
return_rate_s = nav_s.pct_change()
return_rate_s.dropna(inplace=True)
if ENABLE_CACHE and path.exists(input_file_path):
date_index = nav_s.index
else:
return_rate_s.name = 'x'
# nav_df = pd.DataFrame([nav_s, return_rate_s]).T
# print(type(nav_df))
# print(nav_df)
if any(return_rate_s < 0):
return_rate_s.to_csv(input_file_path, index=False, header=True)
date_index = return_rate_s.index
else:
logger.warning('wind_code:%s will be ignored because of no drawback' % nav_s.name)
date_index = None
return date_index
def cal_fhs_garch(wind_code, simulate_count, nav_acc_s):
"""
获取子基金净值,生成csv文件,调用FHSGACH3Py.R文件计算,返回 csv文件
:param wind_code: 基金代码
:return: 压力测试数据 DataFrame, index为日期,column每一组压力测试数据
"""
nav_date_latest = nav_acc_s.index[-1]
input_file_name = '%s_%s.csv' % (wind_code, nav_date_latest.strftime(DATE_STR_FORMAT))
logger.debug("%s 将被 robjects.r['FHSGACH3Py'] 引用", input_file_name)
input_file_path = get_cache_file_path(ANALYSIS_CACHE_FILE_NAME, input_file_name)
date_index = get_return_rate_csv(nav_acc_s, input_file_path)
if date_index is None:
logger.warning('%s has not data for fhs_garch test', wind_code)
return None
output_file_name = '%s_simulate_%s.csv' % (wind_code, nav_date_latest.strftime(DATE_STR_FORMAT))
output_file_path = get_cache_file_path(ANALYSIS_CACHE_FILE_NAME, output_file_name)
if not ENABLE_CACHE or not path.exists(output_file_path):
# 测试使用,临时将 input_file_name 替换为 样例csv文件
# input_file_name = 'rr_example.csv'
logger.info('invoke fhs_garch.R for %s %d', wind_code, simulate_count)
r_file_path = get_cache_file_path('stress_testing', 'fhs_garch.R', create_if_not_found=False)
robjects.r.source(r_file_path)
robjects.r['FHSGACH3Py'](input_file_path, output_file_path, simulate_count)
# 读取 csv文件,重置索引
simulate_df = pd.DataFrame.from_csv(output_file_path).reset_index().set_index(date_index)
# print('simulate_df.shape', simulate_df.shape)
return simulate_df
def cal_fof_fhs_garch(wind_code_list, simulate_count, mode='pad'):
"""
对制定的基金组合进行fhs-garch压力测试
:param wind_code_list: 基金列表
:param simulate_count: 对每一只子基金的压力测试数量
:return: 返回每一只子基金的压力测试数据,各个子基金日期取交集
"""
min_acceptable_data_len = 10
# 获取净值数据
nav_acc_s_dic = {}
date_set = None
for wind_code in wind_code_list:
nav_acc_s = get_fund_nav_acc(wind_code)
if nav_acc_s.shape[0] <= min_acceptable_data_len:
continue
if date_set is None:
date_set = set(nav_acc_s.index)
else:
date_set &= set(nav_acc_s.index)
nav_acc_s_dic[wind_code] = nav_acc_s
date_list = list(date_set)
# 生成 压力测试结果
simulate_df_dic = {}
if mode == 'pad':
date_list.sort()
for wind_code in nav_acc_s_dic.keys():
nav_acc_s = nav_acc_s_dic[wind_code][date_list]
try:
simulate_df = cal_fhs_garch(wind_code, simulate_count, nav_acc_s)
if simulate_df is not None:
simulate_df_dic[wind_code] = simulate_df
except:
logger.exception('cal_fhs_garch for %s got exception:', wind_code)
elif len(date_list) == 0:
logger.warning('%s 历史业绩交集为空,无法进行组合压力测试' % wind_code_list)
return simulate_df_dic
def recursive_composition(key_list, key_index, data_df_dic, data_s=None, label_str=[]):
"""
递归调用对key_list中每一个子基金,对应的 data_df_dic 中的 DataFram 进行组合,加权求和
:param key_list:
:param key_index:
:param data_df_dic:
:param data_s:
:param label_str:
:return:
"""
key_value = key_list[key_index]
key_index_next = 0 if len(key_list) <= (key_index + 1) else key_index + 1
data_df = data_df_dic[key_value]
data_count = data_df.shape[1]
if key_index_next != 0:
data_s_list = []
else:
data_s_list = [''] * data_count
for n in range(data_count):
data_s_curr = data_df.iloc[:, n]
label_str_new = label_str.copy()
label_str_new.append(str(n))
if data_s is not None:
data_s_new = data_s + data_s_curr
else:
data_s_new = data_s_curr
if key_index_next != 0:
data_s_list_new = recursive_composition(key_list, key_index_next, data_df_dic, data_s_new, label_str_new)
data_s_list.extend(data_s_list_new)
else:
data_s_new.rename('_'.join(label_str_new))
data_s_list[n] = data_s_new
return data_s_list
def iter_composition(data_df_dic, simulate_count):
"""
迭代器循环key_list中每一个子基金,对应的 data_df_dic 中的 DataFram 进行组合,加权求和
:param data_df_dic:
:param simulate_count:
:return:
"""
key_list = list(data_df_dic.keys())
key_count = len(key_list)
iter_result = product(range(simulate_count), repeat=key_count)
data_s_list = [""] * (simulate_count ** key_count)
# print("data_s_list len:", simulate_count ** key_count)
data_s_count = 0
for comp in iter_result:
data_s = None
for n_key in range(key_count):
key = key_list[n_key]
data_df = data_df_dic[key]
n = comp[n_key]
if data_s is None:
data_s = data_df.iloc[:, n]
else:
data_s += data_df.iloc[:, n]
data_s_list[data_s_count] = data_s
data_s_count += 1
return data_s_list
def fof_fhs_garch(wind_code_list, simulate_count):
"""
对子基金组合进行组合压力测试
:param wind_code_list: 子基金列表
:param simulate_count: 每一只子基金的压力测试数量。母基金压力测试数量= simulate_count ** 子基金数量
:return: 组合压力测试df,每一支子基金压力测试df
"""
simulate_df_dic = cal_fof_fhs_garch(wind_code_list, simulate_count)
sub_fund_count = len(simulate_df_dic)
if simulate_df_dic is None or sub_fund_count == 0:
logger.warning('FHS GARCH has no data for composition with fund codes:\n%s', wind_code_list)
return None, None
simulate_pct_df_dic = {}
wind_code_new_list = list(simulate_df_dic.keys())
# wind_code_count = len(wind_code_new_list)
weight = 1.0 / sub_fund_count
# weighted_dic = {}
# 将子基金的压力测试数量连乘获得混合后的总数
tot_comp_count = np.prod([df.shape[1] for df in simulate_df_dic.values()])
# 如果混合后总数大于100万,则降低数据数量
MAX_ALLOW_COUNT = 1000000
if tot_comp_count > MAX_ALLOW_COUNT:
base_div = (tot_comp_count / MAX_ALLOW_COUNT) ** (1.0 / sub_fund_count)
else:
base_div = 1
for wind_code in wind_code_new_list:
simulate_df = simulate_df_dic[wind_code]
# simulate_df.plot(legend=False)
# plt.show()
logger.info('fund %s : %s', wind_code, simulate_df.shape)
# print(simulate_df.head())
simulate_pct_df = simulate_df.pct_change()
simulate_pct_df.fillna(0, inplace=True)
# print("simulate_pct_df")
# print(simulate_pct_df.head())
if base_div > 1:
# 降低模拟数据的数量级
willing_count = int(simulate_pct_df.shape[1] / base_div)
simulate_pct_df = simulate_pct_df.ix[:, :willing_count]
simulate_pct_df_dic[wind_code] = simulate_pct_df * weight
# weighted_dic[wind_code] = weighted
# time_1 = time.time()
logger.debug('对%s进行组合', wind_code_new_list)
data_s_list = recursive_composition(wind_code_new_list, 0, simulate_pct_df_dic)
# time_2 = time.time()
# data_s_list = iter_composition(simulate_pct_df_dic, simulate_count)
# time_3 = time.time()
# print("recursive_composition cost:", time_2 - time_1)
# print("iter_composition cost:", time_3 - time_2)
# print(len(data_s_list))
data_df = pd.concat(data_s_list, axis=1)
# print(data_df.shape)
simulate_comp_df = (data_df + 1).cumprod()
# print("simulate_comp_df:", simulate_comp_df.shape)
# print(simulate_comp_df.head())
return simulate_comp_df, simulate_df_dic
def savefig_df(df, file_name):
file_path = get_cache_file_path(ANALYSIS_CACHE_FILE_NAME, file_name)
df.plot(legend=False)
plt.savefig(file_path)
return file_path
def plot_fof_fhs_garch(wind_code_list, simulate_count):
simulate_comp_df, simulate_df_dic = fof_fhs_garch(wind_code_list, simulate_count)
if simulate_comp_df is None:
return None
print("simulate_comp_df.shape:", simulate_comp_df.shape)
# plt.show()
fund_file_path_dic = {}
figure_time = datetime.now()
# for wind_code, simulate_df in simulate_df_dic.items():
# file_name = '%s_%s.png' % (wind_code, figure_time.strftime('%Y_%m_%d %H_%M_%S'))
# fof_file_path = savefig_df(simulate_df, file_name)
# fund_file_path_dic[wind_code] = fof_file_path
fof_file_name = 'fof_%s.png' % (figure_time.strftime('%Y_%m_%d %H_%M_%S'))
fof_file_path = savefig_df(simulate_comp_df, fof_file_name)
fund_file_path_dic['fof'] = fof_file_path
return fund_file_path_dic
def do_fhs_garch(wind_code_p_list=[]):
"""
后台作业,对数据库中所有fof基金进行压力测试
如果 wind_code_p_list 不为空,则近执行 wind_code_p_list 中的母基金
:return:
"""
# sql_str = """select id, ffp.wind_code_p, wind_code_s, date_adj, invest_scale
# from fof_fund_pct ffp,
# (
# select wind_code_p, max(date_adj) date_latest from fof_fund_pct group by wind_code_p
# ) ff_date_latest
# where ffp.wind_code_p = ff_date_latest.wind_code_p
# and ffp.date_adj = ff_date_latest.date_latest"""
sql_str = """select id, ffp.wind_code_p, ffp.wind_code_s, wind_code, date_adj, invest_scale
from fof_fund_pct ffp,
(
select wind_code_p, max(date_adj) date_latest
from fof_fund_pct group by wind_code_p
) ff_date_latest,
fund_essential_info ffm
where ffp.wind_code_p = ff_date_latest.wind_code_p
and ffp.wind_code_s = ffm.wind_code_s
and ffp.date_adj = ff_date_latest.date_latest"""
engine = get_db_engine()
fof_fund_df = pd.read_sql(sql_str, engine)
# 按 母基金代码进行分组
wind_code_fund_dic = dict(list(fof_fund_df.groupby('wind_code_p')))
# 按 基金代码进行分组,对应所有 wind_code_s 代码
wind_code_df_dic = dict(list(fof_fund_df[['wind_code', 'wind_code_s']].groupby('wind_code')))
wind_code_s_dic = {wind_code: set(df['wind_code_s'].values) for wind_code, df in wind_code_df_dic.items()}
date_to = date.today()
date_to_str = date_to.strftime(STR_FORMAT_DATE)
date_from = date_to - timedelta(days=365)
date_from_str = date_from.strftime(STR_FORMAT_DATE)
simulate_count = STRESS_TESTING_SIMULATE_COUNT_FHS_GARCH
r = get_redis()
if wind_code_p_list is not None and len(wind_code_p_list) > 0:
wind_code_p_set = set(wind_code_p_list)
else:
wind_code_p_set = None
for wind_code_p, fof_fund_sub_df in wind_code_fund_dic.items():
if wind_code_p_set is not None and wind_code_p not in wind_code_p_set:
logger.debug('%s 不在列表中 跳过', wind_code_p)
continue
wind_code_list = list(fof_fund_sub_df['wind_code']) # ['XT1410445.XT', 'J11039.OF']
wind_code_count = len(wind_code_list)
if wind_code_count <= 0:
continue
simulate_comp_df, simulate_df_dic = fof_fhs_garch(wind_code_list, simulate_count)
if simulate_comp_df is None:
logger.error('%s has no FHS GARCH test data. sub fund list: %s', wind_code_p, wind_code_list)
continue
for wind_code, simulate_df in simulate_df_dic.items():
time_line = simulate_df.index
time_line = [i.strftime('%Y-%m-%d') for i in time_line]
df = simulate_df.T.quantile(QUANTILE_LIST).T
result = {"time": time_line, "data": [{"name": i, "data": np.array(df[i]).tolist()} for i in df.columns]}
result['show_count'] = simulate_count
# 将wind_code 及 wind_code_s 对应的压力测试结果插入redis
key = '%s_%s' % (wind_code, 'fhs_garch')
logger.info('%s has been complete,', key)
r.set(key, json.dumps(result))
for wind_code_s in wind_code_s_dic[wind_code]:
key = '%s_%s' % (wind_code_s, 'fhs_garch')
logger.info('%s has been complete,', key)
r.set(key, json.dumps(result))
time_line = simulate_comp_df.index
time_line = [i.strftime('%Y-%m-%d') for i in time_line]
df = simulate_comp_df.T.quantile(QUANTILE_LIST).T
result = {"time": time_line, "data": [{"name": i, "data": np.array(df[i]).tolist()} for i in df.columns],
'show_count': simulate_count}
key = '%s_%s' % (wind_code_p, 'fhs_garch')
logger.info('%s has benn complate', key)
r.set(key, json.dumps(result))
def do_fhs_garch_4_scheme(scheme_id):
"""
根据 scheme_id 计算相关组合的 fhs-garch 压力测试
:param scheme_id:
:return:
"""
sql_str = "SELECT wind_code, invest_scale FROM scheme_fund_pct where scheme_id=%s"
engine = get_db_engine()
fund_pct_df = pd.read_sql(sql_str, engine, params=[str(scheme_id)])
simulate_count = STRESS_TESTING_SIMULATE_COUNT_FHS_GARCH
wind_code_list = list(fund_pct_df['wind_code']) # ['XT1410445.XT', 'J11039.OF']
wind_code_count = len(wind_code_list)
if wind_code_count <= 0:
logger.warning('scheme %s has no sub fund list', scheme_id)
return
# 执行 fhs-garch压力测试
simulate_comp_df, simulate_df_dic = fof_fhs_garch(wind_code_list, simulate_count)
if simulate_comp_df is None:
logger.error('scheme %s has no FHS GARCH test data. sub fund list: %s', scheme_id, wind_code_list)
return
logger.info('do_fhs_garch for %d on wind_code_p with %s', scheme_id, wind_code_list)
# 将组合压力测试结果存储到 | |
<reponame>alitalia598/mssdk
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/5/4 15:58
Desc: 交易所网站获取期货日线行情
"""
import datetime
import json
import re
import warnings
import zipfile
from io import BytesIO, StringIO
import numpy as np
import pandas as pd
import requests
from mssdk.futures import cons
from mssdk.futures.requests_fun import requests_link
calendar = cons.get_calendar()
def get_cffex_daily(date: str = "20100401") -> pd.DataFrame:
"""
中国金融期货交易所日交易数据
http://www.cffex.com.cn/rtj/
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象; 为空时为当天
:return: pandas.DataFrame
中国金融期货交易所日:
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定日期没有交易数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
url = f"http://www.cffex.com.cn/sj/historysj/{date[:-2]}/zip/{date[:-2]}.zip"
r = requests.get(url)
try:
with zipfile.ZipFile(BytesIO(r.content)) as file:
with file.open(f"{date}_1.csv") as my_file:
data = my_file.read().decode("gb2312")
data_df = pd.read_csv(StringIO(data))
except:
return None
data_df = data_df[data_df["合约代码"] != "小计"]
data_df = data_df[data_df["合约代码"] != "合计"]
data_df = data_df[~data_df["合约代码"].str.contains("IO")]
data_df.reset_index(inplace=True, drop=True)
data_df["合约代码"] = data_df["合约代码"].str.strip()
symbol_list = data_df["合约代码"].to_list()
variety_list = [re.compile(r"[a-zA-Z_]+").findall(item)[0] for item in symbol_list]
if data_df.shape[1] == 15:
data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover",
"open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_", "_"]
else:
data_df.columns = ["symbol", "open", "high", "low", "volume", "turnover",
"open_interest", "_", "close", "settle", "pre_settle", "_", "_", "_"]
data_df["date"] = date
data_df["variety"] = variety_list
data_df = data_df[
["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle",
"pre_settle", "variety"]]
return data_df
def get_ine_daily(date: str = "20200106") -> pd.DataFrame:
"""
上海国际能源交易中心-日频率-量价数据
上海国际能源交易中心: 原油期货(上市时间: 20180326); 20号胶期货(上市时间: 20190812)
trade_price: http://www.ine.cn/statements/daily/?paramid=kx
trade_note: http://www.ine.cn/data/datanote.dat
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日
:type date: str or datetime.date
:return: 上海国际能源交易中心-日频率-量价数据
:rtype: pandas.DataFrame or None
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{day.strftime('%Y%m%d')}非交易日")
return None
url = f"http://www.ine.cn/data/dailydata/kx/kx{day.strftime('%Y%m%d')}.dat"
r = requests.get(url)
result_df = pd.DataFrame()
try:
data_json = r.json()
except:
return None
temp_df = pd.DataFrame(data_json["o_curinstrument"]).iloc[:-1, :]
temp_df = temp_df[temp_df["DELIVERYMONTH"] != "小计"]
temp_df = temp_df[~temp_df["PRODUCTNAME"].str.contains("总计")]
try:
result_df["symbol"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip() + temp_df["DELIVERYMONTH"]
except:
result_df["symbol"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0] + temp_df["DELIVERYMONTH"]
result_df["date"] = day.strftime("%Y%m%d")
result_df["open"] = temp_df["OPENPRICE"]
result_df["high"] = temp_df["HIGHESTPRICE"]
result_df["low"] = temp_df["LOWESTPRICE"]
result_df["close"] = temp_df["CLOSEPRICE"]
result_df["volume"] = temp_df["VOLUME"]
result_df["open_interest"] = temp_df["OPENINTEREST"]
result_df["turnover"] = 0
result_df["settle"] = temp_df["SETTLEMENTPRICE"]
result_df["pre_settle"] = temp_df["PRESETTLEMENTPRICE"]
try:
result_df["variety"] = temp_df["PRODUCTGROUPID"].str.upper().str.strip()
except:
result_df["variety"] = temp_df["PRODUCTID"].str.upper().str.strip().str.split("_", expand=True).iloc[:, 0]
result_df = result_df[result_df["symbol"] != "总计"]
result_df = result_df[~result_df["symbol"].str.contains("efp")]
return result_df
def get_czce_daily(date: str = "20050525") -> pd.DataFrame:
"""
郑州商品交易所-日频率-量价数据
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象,默认为当前交易日; 日期需要大于 20100824
:type date: str or datetime.date
:return: 郑州商品交易所-日频率-量价数据
:rtype: pandas.DataFrame or None
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn(f"{day.strftime('%Y%m%d')}非交易日")
return None
if day > datetime.date(2010, 8, 24):
if day > datetime.date(2015, 9, 19):
u = cons.CZCE_DAILY_URL_3
url = u % (day.strftime("%Y"), day.strftime("%Y%m%d"))
elif day < datetime.date(2015, 9, 19):
u = cons.CZCE_DAILY_URL_2
url = u % (day.strftime("%Y"), day.strftime("%Y%m%d"))
listed_columns = cons.CZCE_COLUMNS
output_columns = cons.OUTPUT_COLUMNS
try:
r = requests.get(url)
html = r.text
except requests.exceptions.HTTPError as reason:
if reason.response.status_code != 404:
print(
cons.CZCE_DAILY_URL_3
% (day.strftime("%Y"), day.strftime("%Y%m%d")),
reason,
)
return
if html.find("您的访问出错了") >= 0 or html.find("无期权每日行情交易记录") >= 0:
return
html = [
i.replace(" ", "").split("|")
for i in html.split("\n")[:-4]
if i[0][0] != "小"
]
if day > datetime.date(2015, 9, 19):
if html[1][0] not in ["品种月份", "品种代码", "合约代码"]:
return
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in html[2:]:
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r" or row[i + 1] == '':
row_dict[field] = 0.0
elif field in [
"volume",
"open_interest",
"oi_chg",
"exercise_volume",
]:
row[i + 1] = row[i + 1].replace(",", "")
row_dict[field] = int(row[i + 1])
else:
row[i + 1] = row[i + 1].replace(",", "")
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
elif day < datetime.date(2015, 9, 19):
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in html[1:]:
row = row[0].split(",")
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r":
row_dict[field] = 0.0
elif field in [
"volume",
"open_interest",
"oi_chg",
"exercise_volume",
]:
row_dict[field] = int(float(row[i + 1]))
else:
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
if day <= datetime.date(2010, 8, 24):
u = cons.CZCE_DAILY_URL_1
url = u % day.strftime("%Y%m%d")
listed_columns = cons.CZCE_COLUMNS_2
output_columns = cons.OUTPUT_COLUMNS
df = pd.read_html(url)[1].dropna(how="any")
dict_data = list()
day_const = int(day.strftime("%Y%m%d"))
for row in df.to_dict(orient="records"):
row = list(row.values())
m = cons.FUTURES_SYMBOL_PATTERN.match(row[0])
if not m:
continue
row_dict = {"date": day_const, "symbol": row[0], "variety": m.group(1)}
for i, field in enumerate(listed_columns):
if row[i + 1] == "\r":
row_dict[field] = 0.0
elif field in ["volume", "open_interest", "oi_chg", "exercise_volume"]:
row_dict[field] = int(row[i + 1])
else:
row_dict[field] = float(row[i + 1])
dict_data.append(row_dict)
return pd.DataFrame(dict_data)[output_columns]
def get_shfe_v_wap(date: str = "20131017") -> pd.DataFrame:
"""
获取上期所日成交均价数据
Parameters
------
date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
Return
-------
DataFrame
郑商所日交易数据(DataFrame):
symbol 合约代码
date 日期
time_range v_wap时段,分09:00-10:15和09:00-15:00两类
v_wap 加权平均成交均价
或 None(给定日期没有数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
try:
json_data = json.loads(
requests_link(
cons.SHFE_V_WAP_URL % (day.strftime("%Y%m%d")),
headers=cons.headers,
encoding="utf-8",
).text
)
except:
return None
if len(json_data["o_currefprice"]) == 0:
return None
try:
df = pd.DataFrame(json_data["o_currefprice"])
df["INSTRUMENTID"] = df["INSTRUMENTID"].str.strip()
df[":B1"].astype("int16")
return df.rename(columns=cons.SHFE_V_WAP_COLUMNS)[
list(cons.SHFE_V_WAP_COLUMNS.values())
]
except:
return None
def get_shfe_daily(date: str = "20160104") -> pd.DataFrame:
"""
上海期货交易所-日频率-量价数据
http://www.shfe.com.cn/statements/dataview.html?paramid=kx
:param date: 日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象, 默认为当前交易日
:type date: str or datetime.date
:return: 上海期货交易所-日频率-量价数据
:rtype: pandas.DataFrame or None
上期所日交易数据(DataFrame):
symbol 合约代码
date 日期
open 开盘价
high 最高价
low 最低价
close 收盘价
volume 成交量
open_interest 持仓量
turnover 成交额
settle 结算价
pre_settle 前结算价
variety 合约类别
或 None(给定交易日没有交易数据)
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
try:
json_data = json.loads(
requests_link(
cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")),
headers=cons.shfe_headers,
).text
)
except requests.HTTPError as reason:
if reason.response != 404:
print(cons.SHFE_DAILY_URL % (day.strftime("%Y%m%d")), reason)
return
if len(json_data["o_curinstrument"]) == 0:
return
df = pd.DataFrame(
[
row
for row in json_data["o_curinstrument"]
if row["DELIVERYMONTH"] not in ["小计", "合计"] and row["DELIVERYMONTH"] != ""
]
)
try:
df["variety"] = df["PRODUCTGROUPID"].str.upper().str.strip()
except KeyError as e:
df["variety"] = df["PRODUCTID"].str.upper().str.split('_', expand=True).iloc[:, 0].str.strip()
df["symbol"] = df["variety"] + df["DELIVERYMONTH"]
df["date"] = day.strftime("%Y%m%d")
v_wap_df = get_shfe_v_wap(day)
if v_wap_df is not None:
df = pd.merge(
df,
v_wap_df[v_wap_df.time_range == "9:00-15:00"],
on=["date", "symbol"],
how="left",
)
df["turnover"] = df.v_wap * df.VOLUME
else:
df["VOLUME"] = df["VOLUME"].apply(lambda x: 0 if x == "" else x)
df["turnover"] = df["VOLUME"] * df["SETTLEMENTPRICE"]
df.rename(columns=cons.SHFE_COLUMNS, inplace=True)
df = df[~df["symbol"].str.contains("efp")]
return df[cons.OUTPUT_COLUMNS]
def get_dce_daily(date: str = "20030115") -> pd.DataFrame:
"""
大连商品交易所日交易数据
http://www.dce.com.cn/dalianshangpin/xqsj/tjsj26/rtj/rxq/index.html
:param date: 交易日, e.g., 20200416
:type date: str
:return: 具体交易日的个品种行情数据
:rtype: pandas.DataFrame
"""
day = cons.convert_date(date) if date is not None else datetime.date.today()
if day.strftime("%Y%m%d") not in calendar:
warnings.warn("%s非交易日" % day.strftime("%Y%m%d"))
return None
url = "http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html"
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Content-Length": "86",
"Content-Type": "application/x-www-form-urlencoded",
"Host": "www.dce.com.cn",
"Origin": "http://www.dce.com.cn",
"Pragma": "no-cache",
"Referer": "http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
}
params = {
"dayQuotes.variety": "all",
"dayQuotes.trade_type": "0",
"year": date[:4],
"month": str(int(date[4:6]) - 1),
"day": date[6:],
"exportFlag": "excel",
}
r = requests.post(url, data=params, headers=headers)
data_df = pd.read_excel(BytesIO(r.content))
data_df = data_df[~data_df["商品名称"].str.contains("小计")]
data_df = data_df[~data_df["商品名称"].str.contains("总计")]
data_df["variety"] = data_df["商品名称"].map(lambda x: cons.DCE_MAP[x])
data_df["symbol"] = data_df["variety"] + data_df["交割月份"].astype(int).astype(str)
del data_df["商品名称"]
del data_df["交割月份"]
data_df.columns = ["open", "high", "low", "close",
"pre_settle", "settle", "_", "_",
"volume", "open_interest", "_", "turnover", "variety", "symbol"]
data_df["date"] = date
data_df = data_df[
["symbol", "date", "open", "high", "low", "close", "volume", "open_interest", "turnover", "settle",
"pre_settle", "variety"]]
data_df = data_df.applymap(lambda x: x.replace(",", ""))
data_df = data_df.astype({"open": "float",
"high": "float",
"low": "float",
"close": "float",
"volume": "float",
"open_interest": "float",
"turnover": "float",
"settle": "float",
"pre_settle": "float",
})
return data_df
def get_futures_daily(start_date: str = "20210421", end_date: str = "20210426", market: str = "INE", index_bar: bool = False) -> pd.DataFrame:
"""
交易所日交易数据
:param start_date: 开始日期 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:type start_date: str
:param end_date: 结束数据 format:YYYY-MM-DD 或 YYYYMMDD 或 datetime.date对象 为空时为当天
:type end_date: str
:param market: | |
#!/usr/bin/python2.7
"""
Copyright (c) 2014, ICFLIX Media FZ LLC All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Desc: Generate Nagios configuration from given file, resp. from check_multi.
"""
import logging
import logging.handlers
import json
import os.path
import re
import sys
import yaml
from nagios_to_yaml import NagiosToYaml
ICINGA_DIR = '/etc/icinga'
LOG_FORMAT = '%(asctime)s %(levelname)-10s %(message)s'
MACHINEDB_FILE = '/etc/icinga/machines.json'
NAGIOS_DEFS_FILE = '/etc/icinga/nagios.yml'
NAGIOS_TKEYS = [
'commands',
'contacts',
'contactgroups',
'datacenters',
'hostgroups',
'hosts',
'services'
]
STAGING_DOMAIN = 'icflix.io'
# 7 minutes
SVC_FRESHNESS_THRESHOLD = 420
# Make sure Nagios knows all Hosts in MachineDB
# -> <FQDN>.cfg must exist for each and every Host in MDB
# Re-Generate hostgroups
# Re-Generate contacts
# Re-Generate contactsgroups
# Re-Generate commands
class NagiosConfigGenerator(object):
"""Generate Nagios Configuration for *ONE* Host from given file."""
def __init__(self):
self.machine_db = None
self.nagios_db = None
self.load_machine_db()
self.load_nagios_definitions()
self.mdb_to_nagios()
def add_datacenter_to_nagios(self, dct_dict):
"""Add given Datacenter to Nagios. If given Datacenter is already
known, merge in attributes/values, but don't over-ride Nagios ones.
"""
nagios_dcs = self.nagios_db['datacenters']
dct_name = dct_dict.pop('host_name')
if dct_name not in nagios_dcs:
nagios_dcs[dct_name] = {}
nagios_dct = nagios_dcs[dct_name]
if 'hostgroups' not in nagios_dct:
nagios_dct['hostgroups'] = list()
nagios_dct['hostgroups'].append('datacenter')
if 'host' not in nagios_dct:
nagios_dct['host'] = {}
for attr in dct_dict.iterkeys():
if attr in nagios_dct['host']:
# Don't over-ride Nagios definitions
continue
nagios_dct['host'][attr] = dct_dict[attr]
def add_host_to_nagios(self, host_dict, is_lxc):
"""Add given Host to Nagios. If given Host is already known, merge in
values, but don't over-ride Nagios ones.
"""
nagios_hosts = self.nagios_db['hosts']
hostname = host_dict.pop('host_name')
if hostname not in nagios_hosts:
nagios_hosts[hostname] = {}
nagios_host = nagios_hosts[hostname]
if 'hostgroups' not in nagios_host:
nagios_host['hostgroups'] = list()
auto_hostgroup = self.get_auto_hostgroup(hostname)
nagios_host['hostgroups'].append(auto_hostgroup)
if is_lxc:
nagios_host['hostgroups'].append('lxc')
if ('_DOMAIN' in host_dict
and host_dict['_DOMAIN'] == STAGING_DOMAIN):
nagios_host['hostgroups'].append('stage')
if 'host' not in nagios_host:
nagios_host['host'] = {}
for attr in host_dict.iterkeys():
if attr in nagios_host['host']:
# Don't over-ride Nagios definitions
continue
nagios_host['host'][attr] = host_dict[attr]
def add_services_to_host(self, nagios_host, ext_svcs):
"""Add (external) service definition to Nagios."""
if 'services' not in nagios_host:
nagios_host['services'] = {}
nagios_svcs = nagios_host['services']
for svc_key in ext_svcs['services'].iterkeys():
if svc_key not in nagios_svcs:
nagios_svcs[svc_key] = {}
nagios_svc = nagios_svcs[svc_key]
for attr in ext_svcs['services'][svc_key].iterkeys():
if attr in nagios_svc:
continue
nagios_svc[attr] = ext_svcs['services'][svc_key][attr]
def ensure_host_definitions(self):
"""Ensure Nagios knows all Hosts defined in MDB. This is required in
order to re-generate Hostgroups, because it could easilly happen Nagios
wouldn't know Host(s) in hostgroups.
"""
for host_key in self.nagios_db['hosts'].iterkeys():
host_dict = self.nagios_db['hosts'][host_key]
host_dict['host']['host_name'] = host_key
self.ensure_host_definition(host_dict)
def ensure_host_definition(self, host_dict):
"""Ensure file with Host definition exists."""
if host_dict is None:
return (-1)
host_file = ('%s/objects/host_%s.cfg' %
(ICINGA_DIR, host_dict['host']['host_name']))
if os.path.exists(host_file):
#logging.debug("File '%s' exists.", host_file)
return 1
fhandle = open(host_file, 'w+')
self.write_definition(fhandle, 'host', host_dict['host'])
if 'services' not in host_dict:
host_dict['services'] = {}
dummy_svc = dict()
dummy_svc['active_checks_enabled'] = 1
dummy_svc['check_command'] = 'return-ok'
dummy_svc['check_interval'] = 20
dummy_svc['host_name'] = host_dict['host']['host_name']
dummy_svc['use'] = 'generic-service'
host_dict['services']['dummy-ok'] = dummy_svc
for service_key in host_dict['services'].iterkeys():
service_copy = host_dict['services'][service_key]
service_copy['service_description'] = service_key
self.write_definition(fhandle, 'service',
service_copy)
del service_copy
fhandle.close()
return 0
def finish_host_definition(self, host_dict, hostname):
"""Add/over-ride attributes in Host definition."""
if hostname not in self.nagios_db['hosts']:
return
if 'host' not in self.nagios_db['hosts'][hostname]:
return
for attr in self.nagios_db['hosts'][hostname]['host'].iterkeys():
host_dict[attr] = self.nagios_db['hosts'][hostname]['host'][attr]
def get_auto_hostgroup(self, hostname):
"""Determine automatic Nagios hostgroup."""
auto_hostgroup = hostname.split('.')[0]
auto_hostgroup = re.sub(r'(\d+$|\d+[a-z]+)$', r'', auto_hostgroup)
return auto_hostgroup
def get_host_dict(self, hostname, machine_ip, ssh_port, parents):
"""Create Nagios 'host' as a dictionary from given params.
Parents is expected to be either None or a list.
"""
host_dict = {}
host_dict['use'] = 'generic-host'
host_dict['host_name'] = hostname
host_dict['address'] = machine_ip
if parents is not None:
host_dict['parents'] = ','.join(parents)
if ssh_port is not None:
host_dict['_SSH_PORT'] = ssh_port
splitted = hostname.split('.')
host_dict['_SHORTNAME'] = '.'.join(splitted[:len(splitted)-2])
host_dict['_DOMAIN'] = '.'.join(splitted[len(splitted)-2:])
return host_dict
def get_padding(self, padding_len):
"""Return padding :)"""
padding = ''
while padding_len > 0:
padding += ' '
padding_len -= 1
return padding
def get_ssh_port(self, machine_obj, is_lxc):
"""Determine SSH port for given Machine."""
ssh_port = 22
if is_lxc == False:
return ssh_port
if 'ports' not in machine_obj:
# Ehm, this is a bit inconclusive, isn't it?
return ssh_port
for port_cfg in machine_obj['ports']:
# dict is expected here
if 'private_port' not in port_cfg:
continue
if int(port_cfg['private_port']) == 22:
ssh_port = int(port_cfg['public_port'])
return ssh_port
def load_machine_db(self):
"""Just loads machine DB from JSON."""
with open(MACHINEDB_FILE, 'r') as fhandle:
self.machine_db = json.load(fhandle)['machines']
def load_nagios_definitions(self):
"""Load Nagios definitions from YAML."""
with open(NAGIOS_DEFS_FILE, 'r') as fhandle:
self.nagios_db = yaml.load(fhandle)
# Make nagios_db sane
for top_key in NAGIOS_TKEYS:
if top_key in self.nagios_db:
continue
self.nagios_db[top_key] = {}
if 'passive' not in self.nagios_db['services']:
self.nagios_db['services']['passive'] = {}
if 'active' not in self.nagios_db['services']:
self.nagios_db['services']['active'] = {}
def import_config(self, services_cfg):
"""Import configuration file (sent) from remote Host."""
if not os.path.exists(services_cfg):
logging.error("Given file '%s' doesn't exist.", services_cfg)
return False
hostname = os.path.basename(services_cfg).replace('.cfg', '')
if hostname == '':
logging.error('I have empty hostname! :-(')
return False
nagios_host = None
for host_key in self.nagios_db['hosts'].iterkeys():
if hostname == host_key:
nagios_host = self.nagios_db['hosts'][host_key]
break
if nagios_host is None:
logging.error('Machine %s not found in Nagios/MDB.', hostname)
return False
logging.info('FQDN: %s', hostname)
logging.info('IP: %s', nagios_host['host']['address'])
logging.info('SSH: %s', nagios_host['host']['_SSH_PORT'])
logging.info('Hostgroups: %s', nagios_host['hostgroups'])
nag2yaml = NagiosToYaml()
nag2yaml.parse_nagios_config(services_cfg)
ext_services = nag2yaml.nagios_cfg
for extsvc_key in ext_services['services'].iterkeys():
ext_service = ext_services['services'][extsvc_key]
if 'stage' in nagios_host['hostgroups']:
ext_service['use'] = 'stage-service'
else:
ext_service['use'] = 'generic-service'
ext_service['check_freshness'] = 1
ext_service['active_checks_enabled'] = 0
ext_service['passive_checks_enabled'] = 1
ext_service['freshness_threshold'] = SVC_FRESHNESS_THRESHOLD
ext_service['check_command'] = 'check_dummy_4p!2 "check is stale"'
if extsvc_key not in self.nagios_db['services']['passive']:
continue
# Over-ride attributes from ['services']['passive']
svc_nagios = self.nagios_db['services']['passive'][extsvc_key]
for attr in svc_nagios.iterkeys():
ext_service[attr] = svc_nagios[attr]
self.add_services_to_host(nagios_host, ext_services)
host_file = '%s/objects/host_%s.cfg' % (ICINGA_DIR, hostname)
with open(host_file, 'w+') as fhandle:
host_copy = nagios_host['host'].copy()
host_copy['host_name'] = hostname
self.write_definition(fhandle, 'host', host_copy)
for svc_key in nagios_host['services'].iterkeys():
service_copy = nagios_host['services'][svc_key].copy()
service_copy['service_description'] = svc_key
self.write_definition(fhandle, 'service', service_copy)
return True
def mdb_to_nagios(self):
"""Sync Nagios YAML with MDB."""
for host_key in self.machine_db.iterkeys():
hostname = '%s.icflix.com' % (host_key)
mdb_host = self.machine_db[host_key]
if 'datacenter' in mdb_host and 'provider' in mdb_host:
dct_name = '%s.%s' % (mdb_host['datacenter'],
mdb_host['provider'])
dct_dict = self.get_host_dict(dct_name, 'localhost', None, None)
dct_dict['use'] = 'generic-datacenter'
dct_dict.pop('_SHORTNAME')
dct_dict.pop('_DOMAIN')
self.add_datacenter_to_nagios(dct_dict)
parents = [dct_name]
else:
parents = None
host_dict = self.get_host_dict(hostname, mdb_host['ip'], 22,
parents)
self.add_host_to_nagios(host_dict, False)
if 'lxc' not in mdb_host:
continue
for lxc_key in mdb_host['lxc'].iterkeys():
ssh_port = self.get_ssh_port(mdb_host['lxc'][lxc_key], True)
lxc_dict = self.get_host_dict(lxc_key, mdb_host['ip'],
ssh_port, [hostname])
self.add_host_to_nagios(lxc_dict, True)
def print_definition(self, definition_str, some_dict):
"""Print host definition."""
stuffing_len = 0
dict_keys = some_dict.keys()
dict_keys.sort()
# figure-out padding len
for attribute in dict_keys:
if len(attribute) > stuffing_len:
stuffing_len = len(attribute)
stuffing_len += 1
print 'define %s {' % (definition_str)
for attribute in dict_keys:
padding_len = stuffing_len - len(attribute)
padding = self.get_padding(padding_len)
print ' %s%s%s' % (attribute, padding, some_dict[attribute])
print '}\n'
def run(self, services_cfg):
""" Go, go, go!"""
if not self.import_config(services_cfg):
return False
self.ensure_host_definitions()
self.write_command_definitions()
self.write_contact_definitions()
self.write_contactgroup_definitions()
self.write_datacenter_definitions()
self.write_hostgroup_definitions()
self.write_service_definitions()
return True
def write_command_definitions(self):
"""Write definitions of all commands."""
if 'commands' not in self.nagios_db:
return
commands_file = '%s/objects/commands.cfg' % (ICINGA_DIR)
fhandle = open(commands_file, 'w+')
i = 0
for command in self.nagios_db['commands'].iterkeys():
cmd_dict = self.nagios_db['commands'][command]
cmd_dict['command_name'] = command
self.write_definition(fhandle, 'command', cmd_dict)
i += 1
fhandle.close()
logging.info("Written %i 'command' definitions.", i)
def write_contact_definitions(self):
"""Write definitions of all contacts."""
if 'contacts' not in self.nagios_db:
return
contacts_file = '%s/objects/contacts.cfg' % (ICINGA_DIR)
fhandle = open(contacts_file, 'w+')
i = 0
for contact in self.nagios_db['contacts'].iterkeys():
contact_dict = self.nagios_db['contacts'][contact]
contact_dict['contact_name'] = contact
self.write_definition(fhandle, 'contact', contact_dict)
i += 1
fhandle.close()
logging.info("Written %i 'contact' definitions.", i)
def write_contactgroup_definitions(self):
"""Write definitions of all contactgroups."""
cgroups_file = '%s/objects/contactgroups.cfg' % (ICINGA_DIR)
cgroups = self.nagios_db['contactgroups']
fhandle = open(cgroups_file, 'w+')
i = 0
for cgroup_key in cgroups.iterkeys():
cgroup_dict = cgroups[cgroup_key]
cgroup_dict['contactgroup_name'] = cgroup_key
self.write_definition(fhandle, 'contactgroup', cgroup_dict)
i += 1
fhandle.close()
logging.info("Written %i 'contactgroup' definitions.", i)
def write_datacenter_definitions(self):
"""Write definitions for all datacenters."""
dctrs_file = '%s/objects/datacenters.cfg' % (ICINGA_DIR)
dctrs = self.nagios_db['datacenters']
with open(dctrs_file, 'w+') as fhandle:
i = 0
for dctr_key in dctrs.iterkeys():
dct_dict = dctrs[dctr_key]['host'].copy()
dct_dict['host_name'] = dctr_key
self.write_definition(fhandle, 'host', dct_dict)
i += 1
logging.info("Written %i 'datacenter' definitions.", i)
def write_definition(self, fhandle, definition_str, some_dict):
"""Write Nagios definition into given file pointer."""
stuffing_len = 0
dict_keys = some_dict.keys()
dict_keys.sort()
# figure-out padding len
for attribute in dict_keys:
if len(attribute) > stuffing_len:
stuffing_len = len(attribute)
stuffing_len += 1
fhandle.write('define %s {\n' % (definition_str))
for attribute in dict_keys:
padding_len = stuffing_len - | |
<reponame>TugberkArkose/MLScheduler
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.000170989,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202823,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.00200632,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.173515,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.300464,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.172325,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.646304,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.171204,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.2004,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.000379037,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00629004,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0455049,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0465187,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0458839,
'Execution Unit/Register Files/Runtime Dynamic': 0.0528087,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.110005,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.288677,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.59599,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00193859,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00193859,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0016875,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000652706,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000668245,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00623292,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.018623,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0447196,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.84455,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.174744,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.151888,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.20323,
'Instruction Fetch Unit/Runtime Dynamic': 0.396208,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0712858,
'L2/Runtime Dynamic': 0.0204389,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.82801,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.794362,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0514689,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.051469,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.07204,
'Load Store Unit/Runtime Dynamic': 1.09966,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.126914,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.253828,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.045042,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0460744,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.176864,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0287604,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.413231,
'Memory Management Unit/Runtime Dynamic': 0.0748348,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 18.5219,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.00132171,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00888848,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0897372,
'Renaming Unit/Int Front End RAT/Subthreshold | |
True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.etag = None
self.system_data = None
class ManagedCluster(Resource):
"""The manged cluster resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Azure resource identifier.
:vartype id: str
:ivar name: Azure resource name.
:vartype name: str
:ivar type: Azure resource type.
:vartype type: str
:param location: Required. Azure resource location.
:type location: str
:param tags: A set of tags. Azure resource tags.
:type tags: dict[str, str]
:ivar etag: Azure resource etag.
:vartype etag: str
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~service_fabric_managed_clusters_management_client.models.SystemData
:param sku: The sku of the managed cluster.
:type sku: ~service_fabric_managed_clusters_management_client.models.Sku
:param dns_name: The cluster dns name.
:type dns_name: str
:ivar fqdn: The fully qualified domain name associated with the public load balancer of the
cluster.
:vartype fqdn: str
:ivar ipv4_address: The IPv4 address associated with the public load balancer of the cluster.
:vartype ipv4_address: str
:ivar cluster_id: A service generated unique identifier for the cluster resource.
:vartype cluster_id: str
:ivar cluster_state: The current state of the cluster. Possible values include:
"WaitingForNodes", "Deploying", "BaselineUpgrade", "Upgrading", "UpgradeFailed", "Ready".
:vartype cluster_state: str or
~service_fabric_managed_clusters_management_client.models.ClusterState
:ivar cluster_certificate_thumbprints: List of thumbprints of the cluster certificates.
:vartype cluster_certificate_thumbprints: list[str]
:param client_connection_port: The port used for client connections to the cluster.
:type client_connection_port: int
:param http_gateway_connection_port: The port used for HTTP connections to the cluster.
:type http_gateway_connection_port: int
:param admin_user_name: VM admin user name.
:type admin_user_name: str
:param admin_password: <PASSWORD>.
:type admin_password: str
:param load_balancing_rules: Load balancing rules that are applied to the public load balancer
of the cluster.
:type load_balancing_rules:
list[~service_fabric_managed_clusters_management_client.models.LoadBalancingRule]
:param allow_rdp_access: Setting this to true enables RDP access to the VM. The default NSG
rule opens RDP port to internet which can be overridden with custom Network Security Rules. The
default value for this setting is false.
:type allow_rdp_access: bool
:param network_security_rules: Custom Network Security Rules that are applied to the virtual
network of the cluster.
:type network_security_rules:
list[~service_fabric_managed_clusters_management_client.models.NetworkSecurityRule]
:param clients: Client certificates that are allowed to manage the cluster.
:type clients:
list[~service_fabric_managed_clusters_management_client.models.ClientCertificate]
:param azure_active_directory: The AAD authentication settings of the cluster.
:type azure_active_directory:
~service_fabric_managed_clusters_management_client.models.AzureActiveDirectory
:param fabric_settings: The list of custom fabric settings to configure the cluster.
:type fabric_settings:
list[~service_fabric_managed_clusters_management_client.models.SettingsSectionDescription]
:ivar provisioning_state: The provisioning state of the managed cluster resource. Possible
values include: "None", "Creating", "Created", "Updating", "Succeeded", "Failed", "Canceled",
"Deleting", "Deleted", "Other".
:vartype provisioning_state: str or
~service_fabric_managed_clusters_management_client.models.ManagedResourceProvisioningState
:param cluster_code_version: The Service Fabric runtime version of the cluster. This property
can only by set the user when **upgradeMode** is set to 'Manual'. To get list of available
Service Fabric versions for new clusters use `ClusterVersion API <./ClusterVersion.md>`_. To
get the list of available version for existing clusters use **availableClusterVersions**.
:type cluster_code_version: str
:param cluster_upgrade_cadence: Indicates when new cluster runtime version upgrades will be
applied after they are released. By default is Wave0. Possible values include: "Wave0",
"Wave1", "Wave2".
:type cluster_upgrade_cadence: str or
~service_fabric_managed_clusters_management_client.models.ClusterUpgradeCadence
:param addon_features: List of add-on features to enable on the cluster.
:type addon_features: list[str or
~service_fabric_managed_clusters_management_client.models.ManagedClusterAddOnFeature]
:param enable_auto_os_upgrade: Setting this to true enables automatic OS upgrade for the node
types that are created using any platform OS image with version 'latest'. The default value for
this setting is false.
:type enable_auto_os_upgrade: bool
:param application_type_versions_cleanup_policy: The policy used to clean up unused versions.
:type application_type_versions_cleanup_policy:
~service_fabric_managed_clusters_management_client.models.ApplicationTypeVersionsCleanupPolicy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'etag': {'readonly': True},
'system_data': {'readonly': True},
'fqdn': {'readonly': True},
'ipv4_address': {'readonly': True},
'cluster_id': {'readonly': True},
'cluster_state': {'readonly': True},
'cluster_certificate_thumbprints': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'sku': {'key': 'sku', 'type': 'Sku'},
'dns_name': {'key': 'properties.dnsName', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'ipv4_address': {'key': 'properties.ipv4Address', 'type': 'str'},
'cluster_id': {'key': 'properties.clusterId', 'type': 'str'},
'cluster_state': {'key': 'properties.clusterState', 'type': 'str'},
'cluster_certificate_thumbprints': {'key': 'properties.clusterCertificateThumbprints', 'type': '[str]'},
'client_connection_port': {'key': 'properties.clientConnectionPort', 'type': 'int'},
'http_gateway_connection_port': {'key': 'properties.httpGatewayConnectionPort', 'type': 'int'},
'admin_user_name': {'key': 'properties.adminUserName', 'type': 'str'},
'admin_password': {'key': 'properties.adminPassword', 'type': 'str'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[LoadBalancingRule]'},
'allow_rdp_access': {'key': 'properties.allowRdpAccess', 'type': 'bool'},
'network_security_rules': {'key': 'properties.networkSecurityRules', 'type': '[NetworkSecurityRule]'},
'clients': {'key': 'properties.clients', 'type': '[ClientCertificate]'},
'azure_active_directory': {'key': 'properties.azureActiveDirectory', 'type': 'AzureActiveDirectory'},
'fabric_settings': {'key': 'properties.fabricSettings', 'type': '[SettingsSectionDescription]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'cluster_code_version': {'key': 'properties.clusterCodeVersion', 'type': 'str'},
'cluster_upgrade_cadence': {'key': 'properties.clusterUpgradeCadence', 'type': 'str'},
'addon_features': {'key': 'properties.addonFeatures', 'type': '[str]'},
'enable_auto_os_upgrade': {'key': 'properties.enableAutoOSUpgrade', 'type': 'bool'},
'application_type_versions_cleanup_policy': {'key': 'properties.applicationTypeVersionsCleanupPolicy', 'type': 'ApplicationTypeVersionsCleanupPolicy'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
sku: Optional["Sku"] = None,
dns_name: Optional[str] = None,
client_connection_port: Optional[int] = 19000,
http_gateway_connection_port: Optional[int] = 19080,
admin_user_name: Optional[str] = None,
admin_password: Optional[str] = None,
load_balancing_rules: Optional[List["LoadBalancingRule"]] = None,
allow_rdp_access: Optional[bool] = None,
network_security_rules: Optional[List["NetworkSecurityRule"]] = None,
clients: Optional[List["ClientCertificate"]] = None,
azure_active_directory: Optional["AzureActiveDirectory"] = None,
fabric_settings: Optional[List["SettingsSectionDescription"]] = None,
cluster_code_version: Optional[str] = None,
cluster_upgrade_cadence: Optional[Union[str, "ClusterUpgradeCadence"]] = None,
addon_features: Optional[List[Union[str, "ManagedClusterAddOnFeature"]]] = None,
enable_auto_os_upgrade: Optional[bool] = None,
application_type_versions_cleanup_policy: Optional["ApplicationTypeVersionsCleanupPolicy"] = None,
**kwargs
):
super(ManagedCluster, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.dns_name = dns_name
self.fqdn = None
self.ipv4_address = None
self.cluster_id = None
self.cluster_state = None
self.cluster_certificate_thumbprints = None
self.client_connection_port = client_connection_port
self.http_gateway_connection_port = http_gateway_connection_port
self.admin_user_name = admin_user_name
self.admin_password = <PASSWORD>
self.load_balancing_rules = load_balancing_rules
self.allow_rdp_access = allow_rdp_access
self.network_security_rules = network_security_rules
self.clients = clients
self.azure_active_directory = azure_active_directory
self.fabric_settings = fabric_settings
self.provisioning_state = None
self.cluster_code_version = cluster_code_version
self.cluster_upgrade_cadence = cluster_upgrade_cadence
self.addon_features = addon_features
self.enable_auto_os_upgrade = enable_auto_os_upgrade
self.application_type_versions_cleanup_policy = application_type_versions_cleanup_policy
class ManagedClusterListResult(msrest.serialization.Model):
"""Managed Cluster list results.
:param value:
:type value: list[~service_fabric_managed_clusters_management_client.models.ManagedCluster]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ManagedCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ManagedCluster"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(ManagedClusterListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ManagedClusterUpdateParameters(msrest.serialization.Model):
"""Managed cluster update request.
:param tags: A set of tags. Managed cluster update parameters.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ManagedClusterUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class ManagedIdentity(msrest.serialization.Model):
"""Describes the managed identities for an Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of the managed identity. This property will only be
provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the managed identity. This property will only be provided for
a system assigned identity.
:vartype tenant_id: str
:param type: The type of managed identity for the resource. Possible values include: "None",
"SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned".
:type type: str or
~service_fabric_managed_clusters_management_client.models.ManagedIdentityType
:param user_assigned_identities: The list of user identities associated with the resource. The
user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~service_fabric_managed_clusters_management_client.models.UserAssignedIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentity"]] = None,
**kwargs
):
super(ManagedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ManagedProxyResource(msrest.serialization.Model):
"""The resource model definition for proxy-only | |
15.149, 15.150, 15.151, 15.152, 15.153, 15.154, 15.155, 15.156, 15.157,
15.158, 15.159, 15.160, 15.161, 15.162, 15.163, 15.164, 15.165, 15.166, 15.167, 15.168,
15.169, 15.170, 15.171, 15.172, 15.173, 15.174, 15.175, 15.176, 15.177, 15.178, 15.179,
15.180, 15.181, 15.182, 15.183, 15.184, 15.185, 15.186, 15.187, 15.188, 15.189, 15.190,
15.191, 15.192, 15.193, 15.194, 15.195, 15.196, 15.197, 15.198, 15.199, 15.200, 15.201,
15.202, 15.203, 15.204, 15.205, 15.206, 15.207, 15.208, 15.209, 15.210, 15.211, 15.212,
15.213, 15.214, 15.215, 15.216, 15.217, 15.218, 15.219, 15.220, 15.221, 15.222, 15.223,
15.224, 15.225, 15.226, 15.227, 15.228, 15.229, 15.230, 15.231, 15.232, 15.233, 15.234,
15.235, 15.236, 15.237, 15.238, 15.239, 15.240, 15.241, 15.242, 15.243, 15.244, 15.245,
15.246, 15.247, 15.248, 15.249, 15.250, 15.251, 15.252, 15.253, 15.254, 15.255, 15.256,
15.257, 15.258, 15.259, 15.260, 15.261, 15.262, 15.263, 15.264, 15.265, 15.266, 15.267,
15.268, 15.269, 15.270, 15.271, 15.272, 15.273, 15.274, 15.275, 15.276, 15.277, 15.278,
15.279, 15.280, 15.281, 15.282, 15.283, 15.284, 15.285, 15.286, 15.287, 15.288, 15.289,
15.290, 15.291, 15.292, 15.293, 15.294, 15.295, 15.296, 15.297, 15.298, 15.299, 15.300,
15.301, 15.302, 15.303, 15.304, 15.305, 15.306, 15.307, 15.308, 15.309, 15.310, 15.311,
15.312, 15.313, 15.314, 15.315, 15.316, 15.317, 15.318, 15.319, 15.320, 15.321, 15.322,
15.323, 15.324, 15.325, 15.326, 15.327, 15.328, 15.329, 15.330, 15.331, 15.332, 15.333,
15.334, 15.335, 15.336, 15.337, 15.338, 15.339, 15.340, 15.341, 15.342, 15.343, 15.344,
15.345, 15.346, 15.347, 15.348, 15.349, 15.350, 15.351, 15.352, 15.353, 15.354, 15.355,
15.356, 15.357, 15.358, 15.359, 15.360, 15.361, 15.362, 15.363, 15.364, 15.365, 15.366,
15.367, 15.368, 15.369, 15.370, 15.371, 15.372, 15.373, 15.374, 15.375, 15.376, 15.377,
15.378, 15.379, 15.380, 15.381, 15.382, 15.383, 15.384, 15.385, 15.386, 15.387, 15.388,
15.389, 15.390, 15.391, 15.392, 15.393, 15.394, 15.395, 15.396, 15.397, 15.398, 15.399,
15.400, 15.401, 15.402, 15.403, 15.404, 15.405, 15.406, 15.407, 15.408, 15.409, 15.410,
15.411, 15.412, 15.413, 15.414, 15.415, 15.416, 15.417, 15.418, 15.419, 15.420, 15.421,
15.422, 15.423, 15.424, 15.425, 15.426, 15.427, 15.428, 15.429, 15.430, 15.431, 15.432,
15.433, 15.434, 15.435, 15.436, 15.437, 15.438, 15.439, 15.440, 15.441, 15.442, 15.443,
15.444, 15.445, 15.446, 15.447, 15.448, 15.449, 15.450, 15.451, 15.452, 15.453, 15.454,
15.455, 15.456, 15.457, 15.458, 15.459, 15.460, 15.461, 15.462, 15.463, 15.464, 15.465,
15.466, 15.467, 15.468, 15.469, 15.470, 15.471, 15.472, 15.473, 15.474, 15.475, 15.476,
15.477, 15.478, 15.479, 15.480, 15.481, 15.482, 15.483, 15.484, 15.485, 15.486, 15.487,
15.488, 15.489, 15.490, 15.491, 15.492, 15.493, 15.494, 15.495, 15.496, 15.497, 15.498,
15.499, 15.500])
nuLnu = np.array([0.915, 0.956, 0.960, 0.938, 0.969, 0.969, 0.999, 0.915, 0.944, 0.930, 0.928,
0.935, 0.896, 0.985, 0.932, 0.931, 1.004, 0.957, 0.927, 0.972, 0.939, 1.018,
0.949, 0.947, 0.981, 0.964, 0.921, 0.972, 0.946, 1.014, 0.918, 0.940, 0.965,
0.992, 0.946, 0.926, 0.967, 0.958, 1.002, 0.925, 0.945, 0.937, 0.933, 0.932,
0.945, 0.908, 0.940, 0.917, 0.939, 0.929, 0.953, 0.984, 1.008, 0.983, 0.992,
0.975, 0.954, 0.973, 0.927, 0.955, 0.931, 0.943, 0.952, 0.941, 0.942, 0.932,
0.965, 0.984, 0.952, 0.959, 0.945, 0.962, 0.954, 0.977, 1.078, 1.040, 1.059,
1.073, 1.136, 1.198, 1.272, 1.457, 1.742, 2.781, 3.065, 3.514, 2.354, 1.792,
1.435, 1.195, 1.191, 1.114, 1.080, 1.076, 1.025, 1.012, 0.990, 0.997, 1.019,
0.998, 0.985, 0.945, 0.982, 0.957, 0.955, 0.929, 0.952, 0.935, 0.901, 0.914,
0.932, 0.921, 0.923, 0.941, 0.932, 0.941, 0.932, 0.973, 0.974, 0.955, 0.965,
0.960, 0.935, 0.888, 0.953, 0.944, 0.960, 0.957, 0.934, 0.966, 0.994, 0.977,
1.038, 1.059, 0.975, 0.985, 0.969, 0.953, 0.962, 0.918, 0.925, 0.920, 0.927,
0.923, 0.956, 0.918, 0.931, 0.925, 0.927, 0.904, 0.896, 0.925, 0.906, 0.908,
0.892, 0.916, 0.914, 0.935, 0.934, 0.955, 0.925, 0.917, 0.908, 0.898, 0.919,
0.912, 0.941, 0.932, 0.942, 0.919, 0.941, 0.953, 0.979, 0.979, 0.976, 0.996,
1.042, 1.033, 1.047, 1.021, 0.977, 1.006, 1.019, 0.988, 0.997, 0.984, 1.023,
0.973, 0.982, 1.031, 0.980, 0.966, 0.944, 0.939, 0.927, 0.951, 0.930, 0.960,
0.981, 0.981, 1.035, 1.102, 2.217, 1.243, 1.047, 1.048, 1.270, 1.192, 1.111,
1.163, 1.168, 1.228, 1.276, 1.389, 1.529, 1.844, 1.527, 1.356, 1.222, 1.148,
1.083, 1.059, 1.011, 1.001, 0.996, 1.002, 1.010, 0.996, 1.010, 1.000, 1.027,
1.084, 1.058, 1.063, 1.067, 1.097, 1.122, 1.113, 1.099, 1.082, 1.128, 1.133,
1.132, 1.132, 1.112, 1.076, 1.106, 1.102, 1.102, 1.058, 1.042, 1.036, 1.003,
0.995, 0.972, 0.973, 0.992, 1.016, 1.011, 1.021, 1.038, 1.089, 1.202, 1.179,
1.253, 1.168, 1.102, 1.035, 0.984, 0.979, 0.960, 0.926, 0.931, 0.930, 0.943,
0.928, 0.901, 0.897, 0.881, 0.899, 0.916, 0.905, 0.904, 0.893, 0.896, 0.918,
0.943, 0.957, 0.969, 0.986, 0.971, 0.931, 0.929, 0.902, 0.889, 0.886, 0.873,
0.887, 0.888, 0.883, 0.884, 0.903, 0.912, 0.930, 0.931, 0.915, 0.897, 0.858,
0.903, 0.909, 0.914, 0.928, 0.941, 0.939, 1.049, 0.955, 0.937, 0.909, 0.891,
0.902, 0.896, 0.894, 0.872, 0.898, 0.917, 0.915, 0.932, 0.949, 0.918, 0.921,
1.089, 0.944, 0.916, 0.915, 0.914, 0.912, 0.904, 0.909, 0.911, 0.910, 0.909,
0.915, 0.922, 0.912, 0.911, 0.908, 0.911, 0.912, 0.908, 0.904, 0.897, 0.893,
0.889, 0.892, 0.898, 0.913, 0.922, 0.933, 0.942, 0.941, 0.942, 0.946, 0.944,
0.937, 0.934, 0.936, 0.942, 1.000, 0.955, 0.938, 0.935, 0.919, 0.917, 0.919,
0.906, 0.901, 0.900, 0.917, 0.910, 0.905, 0.896, 0.898, 0.906, 0.918, 0.913,
0.919, 0.929, 0.922, 0.924, 0.922, 0.915, 0.919, 0.918, 0.925, 0.924, 0.933,
0.932, 0.939, 0.943, 0.926, 0.916, 0.911, 0.899, 0.894, 0.892, 0.892, 0.905,
0.895, 0.877, 0.863, 0.849, 0.839, 0.841, 0.843, 0.839, 0.836, 0.831, 0.829,
0.828, 0.825, 0.819, 0.817, 0.825, 0.831, 0.850, 0.865, 0.880, 0.891, 0.897,
0.899, 0.905, 0.911, 0.907, 0.907, 0.902, 0.901, 0.885, 0.878, 0.870, 0.876,
0.882, 0.895, 0.899, 0.913, 0.927, 0.934, 0.947, 0.965, 0.976, 1.004, 1.014,
1.049, 1.123, 1.239, 1.348, 1.368, 1.265, 1.140, 1.056, 1.013, 0.983, 0.973,
0.967, 0.957, 0.936, 0.927, 0.914, 0.904, 0.894, 0.884, 0.875, 0.870, 0.863,
0.854, 0.860, 0.861, 0.851, 0.844, 0.844, 0.855, 0.866, 0.882, 0.888, 0.892,
0.895, 0.893, 0.891, 0.887, 0.892, 0.894, 0.900, 0.901, 0.905, 0.909, 0.911,
0.914, 0.920, 0.923, 0.925, 0.925, 0.928, 0.931, 0.932, 0.933, 0.936, 0.938,
0.937, 0.936, 0.938, 0.943, 0.937, 0.925, 0.925, 0.932, 0.939, 0.944, 0.944,
0.958, 0.953, 0.931, 0.925, 0.913, 0.911, 0.909, 0.907, 0.903, 0.900, 0.902,
0.904, 0.903, 0.900, 0.904, 0.911, 0.918, 0.929, 0.937, 0.923, 0.911, 0.898,
0.897, 0.890, 0.883, 0.878, 0.866, 0.860, 0.850, 0.842, 0.839, 0.831, 0.832,
0.825, 0.820, 0.818, 0.826, 0.833, 0.832, 0.828, 0.829, 0.819, 0.820, 0.823,
0.827, 0.831, 0.836, 0.842, 0.842, 0.838, 0.835, 0.835, 0.839, 0.845, 0.849,
0.849, 0.848, 0.851, 0.855, 0.863, 0.867, 0.876, 0.883, 0.888, 0.885, 0.890,
0.895, 0.901, 0.897, 0.901, 0.894, 0.884, 0.870, 0.862, 0.858, 0.850, 0.845,
0.844, 0.843, 0.845, 0.837, 0.844, 0.845, 0.845, 0.848, 0.848, 0.844, 0.846,
0.839, 0.832, 0.839, 0.847, 0.857, 0.872, 0.881, 0.890, 0.898, 0.907, 0.933,
0.971, 1.007, 1.053, 1.119, 1.197, 1.245, 1.226, 1.178, 1.155, 1.121, 1.068,
1.019, 0.986, 0.965, 0.954, 0.955, 0.949, 0.949, 0.935, 0.914, 0.895, 0.881,
0.871, 0.863, 0.857, 0.862, 0.862, 0.860, 0.857, 0.857, 0.851, 0.854, 0.857,
0.866, 0.869, 0.862, 0.860, 0.859, 0.855, 0.857, 0.864, 0.872, 0.881, 0.894,
0.889, 0.876, 0.866, 0.867, 0.871, 0.866, 0.870, 0.866, 0.862, 0.856, 0.850,
0.835, 0.843, 0.837, 0.845, 0.840, 0.856, 0.866, 0.883, 0.905, 0.915, 0.922,
0.918, 0.919, 0.910, 0.916, 0.935, 0.955, 0.957, 0.951, 0.942, 0.928, 0.929,
0.922, 0.926, 0.919, 0.921, 0.915, 0.916, 0.918, 0.923, 0.928, 0.934, 0.947,
0.964, 0.987, 1.016, 1.067, 1.125, 1.196, 1.330, 1.489, 1.595, 1.580, 1.468,
1.351, 1.257, 1.175, 1.109, 1.038, 0.993, 0.947, 0.922, 0.906, 0.877, 0.867,
0.866, 0.852, 0.846, 0.846, 0.826, 0.820, 0.817, 0.808, 0.808, 0.792, 0.792,
0.797, 0.782, 0.781, 0.779, 0.780, 0.776, 0.783, 0.791, 0.793, 0.802, 0.818,
0.828, 0.840, 0.866, 0.890, 0.916, 0.960, 1.016, 1.069, 1.069, 1.070, 1.061,
1.035, 0.992, 0.959, 0.938, 0.908, 0.904, 0.898, 0.913, 0.905, 0.893, 0.901,
0.892, 0.906, 0.916, 0.932, 0.943, 0.973, 0.978, 0.972, 0.959, 0.935, 0.954,
0.951, 0.958, 1.009, 1.052, 1.076, 1.098, 1.097, 1.067, 1.044, 1.030, 1.008,
1.010, 1.017, 1.030, 1.057, 1.073, 1.118, 1.180, 1.245, 1.299, 1.310, 1.355,
1.387, 1.463, 1.550, 1.680, 1.900, 2.100, 2.187, 2.188, 2.164, 2.233, 2.371,
2.536, 2.856, 3.141, 3.665, 3.235, 2.608, 2.279, 1.928, 1.795, 1.521, 1.477,
1.307, 1.250, 1.183, 1.067, 0.985, 1.028, 1.045, 1.056, 1.000, 0.931, 0.886,
0.877, 0.856, 0.856, 0.882, 0.876, 0.816, 0.842, 0.833, 0.804, 0.823, 0.865,
0.867, 0.851, 0.937, | |
that will be pushed onto the name stack.
:type name: unsigned int
'''
pass
def glRasterPos(x, y, z, w):
'''Specify the raster position for pixel operations
:param w: Specify the x,y,z, and w object coordinates (if present) for the raster position. If function prototype ends in ‘v’ specifies a pointer to an array of two, three, or four elements, specifying x, y, z, and w coordinates, respectively.
:type w: x,
'''
pass
def glReadBuffer(mode):
'''Select a color buffer source for pixels.
:param mode: Specifies a color buffer.
:type mode: Enumerated constant
'''
pass
def glReadPixels(x, y, width, height, format, type, pixels):
'''Read a block of pixels from the frame buffer
:param y: Specify the window coordinates of the first pixel that is read from the frame buffer. This location is the lower left corner of a rectangular block of pixels.
:type y: x,
:param height: Specify the dimensions of the pixel rectangle. width and height of one correspond to a single pixel.
:type height: width,
:param format: Specifies the format of the pixel data.
:type format: Enumerated constant
:param type: Specifies the data type of the pixel data.
:type type: Enumerated constant
:param pixels: Returns the pixel data.
:type pixels: bgl.Buffer object
'''
pass
def glRect(x1, y1, x2, y2, v1, v2):
'''Draw a rectangle
:param y1: Specify one vertex of a rectangle
:type y1: x1,
:param y2: Specify the opposite vertex of the rectangle
:type y2: x2,
:param v2: Specifies a pointer to one vertex of a rectangle and the pointer to the opposite vertex of the rectangle
:type v2: v1,
'''
pass
def glRenderMode(mode):
'''Set rasterization mode
:param mode: Specifies the rasterization mode.
:type mode: Enumerated constant
'''
pass
def glRotate(angle, x, y, z):
'''Multiply the current matrix by a rotation matrix
:param angle: Specifies the angle of rotation in degrees.
:type angle: Depends on function prototype.
:param z: Specify the x, y, and z coordinates of a vector respectively.
:type z: x,
'''
pass
def glScale(x, y, z):
'''Multiply the current matrix by a general scaling matrix
:param z: Specify scale factors along the x, y, and z axes, respectively.
:type z: x,
'''
pass
def glScissor(x, y, width, height):
'''Define the scissor box
:param y: Specify the lower left corner of the scissor box. Initially (0, 0).
:type y: x,
:param height: Specify the width and height of the scissor box. When a GL context is first attached to a window, width and height are set to the dimensions of that window.
:type height: width
'''
pass
def glSelectBuffer(size, buffer):
'''Establish a buffer for selection mode values
:param size: Specifies the size of buffer
:type size: int
:param buffer: Returns the selection data
:type buffer: bgl.Buffer I{type GL_INT}
'''
pass
def glShadeModel(mode):
'''Select flat or smooth shading
:param mode: Specifies a symbolic value representing a shading technique.
:type mode: Enumerated constant
'''
pass
def glStencilFunc(func, ref, mask):
'''Set function and reference value for stencil testing
:param func: Specifies the test function.
:type func: Enumerated constant
:param ref: Specifies the reference value for the stencil test. ref is clamped to the range [0,2n-1], where n is the number of bitplanes in the stencil buffer. The initial value is 0.
:type ref: int
:param mask: Specifies a mask that is ANDed with both the reference value and the stored stencil value when the test is done. The initial value is all 1’s.
:type mask: unsigned int
'''
pass
def glStencilMask(mask):
'''Control the writing of individual bits in the stencil planes
:param mask: Specifies a bit mask to enable and disable writing of individual bits in the stencil planes. Initially, the mask is all 1’s.
:type mask: unsigned int
'''
pass
def glStencilOp(fail, zfail, zpass):
'''Set stencil test actions
:param fail: Specifies the action to take when the stencil test fails. The initial value is GL_KEEP.
:type fail: Enumerated constant
:param zfail: Specifies the stencil action when the stencil test passes, but the depth test fails. zfail accepts the same symbolic constants as fail. The initial value is GL_KEEP.
:type zfail: Enumerated constant
:param zpass: Specifies the stencil action when both the stencil test and the depth test pass, or when the stencil test passes and either there is no depth buffer or depth testing is not enabled. zpass accepts the same symbolic constants as fail. The initial value is GL_KEEP.
:type zpass: Enumerated constant
'''
pass
def glTexCoord(s, t, r, q, v):
'''Set the current texture coordinates
:param q: Specify s, t, r, and q texture coordinates. Not all parameters are present in all forms of the command.
:type q: s,
:param v: Specifies a pointer to an array of one, two, three, or four elements, which in turn specify the s, t, r, and q texture coordinates.
:type v: bgl.Buffer object. Depends on function prototype. (for ‘v’ prototypes only)
'''
pass
def glTexEnv(target, pname, param):
'''Set texture environment parameters
:param target: Specifies a texture environment. Must be GL_TEXTURE_ENV.
:type target: Enumerated constant
:param pname: Specifies the symbolic name of a single-valued texture environment parameter. Must be GL_TEXTURE_ENV_MODE.
:type pname: Enumerated constant
:param param: Specifies a single symbolic constant. If function prototype ends in ‘v’ specifies a pointer to a parameter array that contains either a single symbolic constant or an RGBA color
:type param: Depends on function prototype.
'''
pass
def glTexGen(coord, pname, param):
'''Control the generation of texture coordinates
:param coord: Specifies a texture coordinate.
:type coord: Enumerated constant
:param pname: Specifies the symbolic name of the texture- coordinate generation function.
:type pname: Enumerated constant
:param param: Specifies a single-valued texture generation parameter. If function prototype ends in ‘v’ specifies a pointer to an array of texture generation parameters. If pname is GL_TEXTURE_GEN_MODE, then the array must contain a single symbolic constant. Otherwise, params holds the coefficients for the texture-coordinate generation function specified by pname.
:type param: Depends on function prototype.
'''
pass
def glTexImage1D(target, level, internalformat, width, border, format, type,
pixels):
'''Specify a one-dimensional texture image
:param target: Specifies the target texture.
:type target: Enumerated constant
:param level: Specifies the level-of-detail number. Level 0 is the base image level. Level n is the nth mipmap reduction image.
:type level: int
:param internalformat: Specifies the number of color components in the texture.
:type internalformat: int
:param width: Specifies the width of the texture image. Must be 2n+2(border) for some integer n. All implementations support texture images that are at least 64 texels wide. The height of the 1D texture image is 1.
:type width: int
:param border: Specifies the width of the border. Must be either 0 or 1.
:type border: int
:param format: Specifies the format of the pixel data.
:type format: Enumerated constant
:param type: Specifies the data type of the pixel data.
:type type: Enumerated constant
:param pixels: Specifies a pointer to the image data in memory.
:type pixels: bgl.Buffer object.
'''
pass
def glTexImage2D(target, level, internalformat, width, height, border, format,
type, pixels):
'''Specify a two-dimensional texture image
:param target: Specifies the target texture.
:type target: Enumerated constant
:param level: Specifies the level-of-detail number. Level 0 is the base image level. Level n is the nth mipmap reduction image.
:type level: int
:param internalformat: Specifies the number of color components in the texture.
:type internalformat: int
:param width: Specifies the width of the texture image. Must be 2n+2(border) for some integer n. All implementations support texture images that are at least 64 texels wide.
:type width: int
| |
import datetime as dt
import operator
import random
import arrow
import blazeutils.strings
import pytz
import six
import sqlalchemy as sa
import wrapt
from blazeutils import tolist
from keg.db import db
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils import ArrowType, EmailType
import keg_elements.db.columns as columns
import keg_elements.db.utils as dbutils
import keg_elements.decorators as decor
from keg_elements.extensions import lazy_gettext as _
might_commit = decor.keyword_optional('_commit', after=dbutils.session_commit, when_missing=True)
"""Decorator directing the wrapped method to commit db session upon completion.
A `_commit` bool kwarg is added to the wrapped method's definition, allowing a developer to
turn off the commit when calling.
Exceptions during commit are raised after the session is rolled back.
"""
might_flush = decor.keyword_optional('_flush', after=dbutils.session_flush)
"""Decorator directing the wrapped method to flush db session upon completion.
A `_flush` bool kwarg is added to the wrapped method's definition, allowing a developer to
turn off the flush when calling.
Exceptions during flush are raised after the session is rolled back.
"""
@wrapt.decorator
def kwargs_match_entity(wrapped, instance, args, kwargs):
"""
Asserts that the kwargs passed to the wrapped method match the columns/relationships
of the entity.
"""
if kwargs.get('_check_kwargs', True):
insp = sa.inspection.inspect(instance)
# Only allow kwargs that correspond to a column or relationship on the entity
allowed_keys = {col.key for col in insp.columns} | set(insp.relationships.keys())
# Ignore kwargs starting with "_"
kwarg_keys = set(key for key in kwargs if not key.startswith('_'))
extra_kwargs = kwarg_keys - allowed_keys
assert not extra_kwargs, _('Unknown column or relationship names in kwargs: {kwargs!r}',
kwargs=sorted(extra_kwargs))
return wrapped(*args, **kwargs)
class DefaultColsMixin(object):
"""Basic entity mixin providing int primary key and created/updated timestamps."""
id = sa.Column('id', sa.Integer, primary_key=True)
created_utc = sa.Column(ArrowType, nullable=False, default=arrow.utcnow,
server_default=dbutils.utcnow())
updated_utc = sa.Column(ArrowType, nullable=False, default=arrow.utcnow, onupdate=arrow.utcnow,
server_default=dbutils.utcnow())
class MethodsMixin:
"""Entity mixin providing developer/testing-centered methods."""
def from_dict(self, data):
"""Update the instance with the passed information in `data`.
`from_dict` will also update relationships either, 1:1, 1:N, M:M.
.. note::
`from_dict` intentionally does not commit and for related entities
turns off auto flushing. This is to prevent premature flushes with
incomplete objects
"""
def update_related(key, value, prop):
"""Update the entity based on the type of relationship it is"""
related_class = prop.mapper.class_
with db.session.no_autoflush:
if prop.uselist:
new_list = list()
for row in value:
obj = related_class.add_or_edit(row, _commit=False)
new_list.append(obj)
setattr(self, key, new_list)
else:
setattr(self, key, related_class.add_or_edit(value, _commit=False))
mapper = self.__mapper__
entity_props = {attr.key: attr for attr in mapper.attrs}
for key, value in six.iteritems(data):
prop = entity_props.get(key)
is_column = isinstance(prop, sa.orm.properties.ColumnProperty)
is_relationship = isinstance(prop, sa.orm.properties.RelationshipProperty)
if prop is None:
continue
elif is_column:
setattr(self, key, value)
elif is_relationship:
update_related(key, value, prop)
else:
raise NotImplementedError(
_('Updating property types is not implemented.'))
def to_dict(self, exclude=frozenset(), hybrids=frozenset()):
"""Covert the object properties to a dictionary.
:param exclude: a list of columns to ignore
:param hybrids: a list of the hybrid properties to include in the dictionary.
:returns: a dictionary representation of the object
.. note: By default hybrid properties are not included in the returned dict. To add a hybrid
property to the returned dict pass a list of the property names and they will be
included.
"""
data = dict((name, getattr(self, name))
for name in self.column_names()
if name not in exclude)
for hybrid in hybrids:
data[hybrid] = getattr(self, hybrid)
return data
@classmethod
def column_names(cls):
"""Return a set of column keys, which may not match attribute names."""
return {col.key for col in cls.__mapper__.columns}
@classmethod
def primary_keys(cls):
"""Helper to get the table's primary key columns."""
return cls.__table__.primary_key.columns
@might_commit
@might_flush
@classmethod
def add(cls, **kwargs):
"""Create a new persisted record constructed from the given kwargs.
:param _commit: enable/disable commit. Default True.
:param _flush: enable/disable flush. Default True.
:return: entity instance created and optionally persisted.
"""
obj = cls()
obj.from_dict(kwargs)
db.session.add(obj)
return obj
@might_commit
@might_flush
@classmethod
def delete(cls, oid):
"""Delete an object from the session
:param oid: the object identifier, normally the primary key
:rtype: bool
:return: The result of the operation
"""
obj = cls.query.get(oid)
if obj is None:
return False
db.session.delete(obj)
return True
@might_commit
@classmethod
def delete_cascaded(cls):
"""For testing, remove all records from the table. Extend for dependencies.
By default, this affects only this entity. By design, though, the entity should
override to call `delete_cascaded` on any entities that have foreign key
dependence on this entity. Key cascades may cover some of these cases, but
db cascades are not always desireable, and tests often need to easily clear
a number of tables to ensure good starting state.
"""
cls.query.delete(synchronize_session=False)
db.session.expire_all()
@might_commit
@might_flush
@classmethod
def edit(cls, oid=None, **kwargs):
"""Edit an object in session with the kwargs, and optionally flush or commit.
:param oid: the object identifier, normally the primary key
:param _commit: enable/disable commit. Default True.
:param _flush: enable/disable flush. Default True.
:return: entity instance edited and optionally flushed/committed
"""
try:
primary_keys = oid or [kwargs.get(x.name)
for x in cls.primary_keys()
if x is not None]
except KeyError:
raise AttributeError(_('No primary key was found in `oid` or `kwargs`'
' for which to retrieve the object to edit'))
obj = cls.query.get(primary_keys)
obj.from_dict(kwargs)
return obj
@classmethod
def get_by(cls, **kwargs):
"""Returns the instance of this class matching the given criteria or
None if there is no record matching the criteria.
If multiple records are returned, an exception is raised.
"""
return cls.query.filter_by(**kwargs).one_or_none()
@classmethod
def get_where(cls, *clauses):
"""
Returns the instance of this class matching the given clause(s) or None
if there is no record matching the criteria.
If multiple records are returned, an exception is raised.
"""
return cls.query.filter(*clauses).one_or_none()
@classmethod
def pairs(cls, key_field, value_field, order_by=(), query=None,
items=None):
"""Return a list of two item tuples
:param key_field: string representing the key
:param value_field: string representing the value
:param order_by: iterable of columns to order the query by
:param query: a base query from which to generate the pairs
:param items: a function which takes one record returned by query and
returns the tuple object
"""
items = items if items else operator.attrgetter(key_field, value_field)
query = query or cls.query
result = query.order_by(*order_by).all()
return [items(obj) for obj in result]
@kwargs_match_entity
@classmethod
def testing_create(cls, **kwargs):
"""Create an object for testing with default data appropriate for the field type
* Will automatically set most field types ignoring those passed in via kwargs.
* Subclasses that have foreign key relationships should setup those relationships before
calling this method. See `testing_set_related` for additional information.
Random data that is set on a column comes from one of these sources:
* `random_data_for_column` entity method provides randoms for most normal column types
* `randomdata` is given in column info as the name of an entity method to call for data::
class MyEntity(MethodsMixin, db.Model):
foo = sa.Column(sa.Unicode, info={'randomdata': 'foo_generator'})
@classmethod
def foo_generator(cls):
return 'bar'
* `random_magnitude` is given in column info to be treated as the +/- random range.
* `random_range` is given specifically as a low/high random range.
Special kwargs:
_numeric_defaults_range: a tuple of (HIGH, LOW) which controls the acceptable defaults of
the two number types. Both integer and numeric (float) fields are controlled by
this setting.
"""
numeric_range = kwargs.pop('_numeric_defaults_range', None)
insp = sa.inspection.inspect(cls)
skippable = lambda column: (column.key in kwargs # skip fields already in kwargs
or column.foreign_keys # skip foreign keys
or column.server_default # skip fields with server defaults
or column.default # skip fields with defaults
or column.primary_key # skip any primary key
)
for column in (col for col in insp.columns if not skippable(col)):
try:
kwargs[column.key] = cls.random_data_for_column(
column, numeric_range)
except ValueError:
pass
return cls.add(**kwargs)
@classmethod
def random_data_for_column(cls, column, numeric_range): # noqa: C901
"""Provides random testing data for a number of column types.
Raises a ValueError if the type is not handled. In that case, override as needed.
"""
if 'randomdata' in column.info:
if type(column.info['randomdata']) is str:
# assume randomdata the is name of a method on the class
callable = getattr(cls, column.info['randomdata'])
data = callable()
return data
return column.info['randomdata']()
default_range = (-100, 100) if numeric_range is None else numeric_range
if isinstance(column.type, sa.types.Enum):
return random.choice(column.type.enums)
elif isinstance(column.type, sa.types.Boolean):
return random.choice([True, False])
elif isinstance(column.type, sa.types.Integer):
if numeric_range is not None:
return random.randint(*default_range)
return dbutils.random_int(column, default_range)
elif isinstance(column.type, sa.types.Float):
return random.uniform(*default_range)
elif isinstance(column.type, sa.types.Numeric):
if numeric_range is not None or column.type.scale is None:
| |
#!/usr/bin/env python3
"""
RESOURCE MANAGEMENT - POLICIES MODULE
Area Resilience - Keep always a Leader in the area!
"""
import threading
import requests
import socket
from time import sleep
from random import randrange
from common.logs import LOG
from common.common import CPARAMS, URLS
from policies.leaderprotectionpolicies import LeaderProtectionPolicies
from requests.exceptions import ConnectTimeout as timeout
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__author__ = 'Universitat Politècnica de Catalunya'
class BackupEntry:
def __init__(self, deviceID, deviceIP, priority):
self.deviceID = deviceID
self.deviceIP = deviceIP
self.priority = priority
self.TTL = 5 / .1 # Only because is creation
class AreaResilience:
# SELECTION_PORT = 46051 # 29512 # Deprecated - Keepalive is now REST!
# LEADER_PORT = 46052 # 29513
# MAX_RETRY_ATTEMPTS = 5
#
# TIME_TO_WAIT_BACKUP_SELECTION = 3
# TIME_KEEPALIVE = 1
# TIME_KEEPER = .1
# MINIMUM_BACKUPS = 1
# MAX_TTL = 3 / .1 # 30 ticks ~ 3 secs
PRIORITY_ON_DEMOTION = -2
PRIORITY_ON_REELECTION = 0
PRIORITY_ON_FAILURE = -3
TAG = '\033[34m' + '[AR]: ' + '\033[0m'
def __init__(self, CIMIRequesterFunction=None, leaderprotectionpolicies_obj=LeaderProtectionPolicies()):
self._connected = False
self._imBackup = False
self._imLeader = False
self._imCapable = False
self._leaderFailed = False
self._backupSelected = False
self._startupCorrect = False
self._deviceID = ''
self._leaderIP = ''
self._backupPriority = -1
self._nextPriority = 1
self._lpp = leaderprotectionpolicies_obj
self.backupDatabase = []
self.backupDatabaseLock = threading.Lock()
self._CIMIRequesterFunction = CIMIRequesterFunction
self.th_proc = None
self.th_keep = None
self.isStarted = False
def __imLeader(self):
"""
:return:
"""
self._imLeader = self.__getCIMIData('leader', False)
return self._imLeader
def __imCapable(self):
"""
:return:
"""
# TODO: Capable by evaluation, not hardcoded
return True # By default, all agents will be capable to be leader.
def __getCIMIData(self, key, default=None):
"""
:return:
"""
if self._CIMIRequesterFunction is None:
value = default
else:
value = self._CIMIRequesterFunction(key, default)
return value
def imBackup(self):
return self._imBackup
def imLeader(self):
return self._imLeader
def getBackupDatabase(self):
with self.backupDatabaseLock:
ret = self.backupDatabase.copy()
return ret
def addBackup(self, deviceID, deviceIP, priority):
found = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == deviceID:
LOG.debug(self.TAG + 'Backup {} found!'.format(deviceID))
found = True
break
if not found:
correct = self.__send_election_message(deviceIP)
if correct:
new_backup = BackupEntry(deviceID, deviceIP, priority)
with self.backupDatabaseLock:
self.backupDatabase.append(new_backup)
LOG.info('Backup {}[{}] added with priority {}'.format(deviceID, deviceIP, priority))
return correct
def deleteBackup(self, deviceID):
"""
:param deviceID:
:return:
"""
# 1- Get and delete backup from database
found = False
correct = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == deviceID:
LOG.debug(self.TAG + 'Backup {} found!'.format(deviceID))
found = True
break
if found:
# Backup is in the database, delete him!
with self.backupDatabaseLock:
self.backupDatabase.remove(backup)
# And now... Let him know...
correct = self.__send_demotion_message(backup.deviceIP)
return correct
def start(self, deviceID): # TODO: Give deviceID at startup?
"""
:return:
"""
self._deviceID = deviceID
if self.isStarted:
LOG.warning(self.TAG + 'Procedure is already started...')
return False
else:
self.th_proc = threading.Thread(name='area_res', target=self.__common_flow, daemon=True)
self.th_proc.start()
self.isStarted = True
LOG.info(self.TAG + 'Module Started')
return True
def stop(self):
"""
Stop all the module activity
:return:
"""
if self.isStarted:
self._connected = False
if self.th_proc is not None:
while self.th_proc.is_alive():
LOG.debug(self.TAG + 'Waiting {} to resume activity...'.format(self.th_proc.name))
sleep(0.5)
if self.th_keep is not None:
while self.th_keep.is_alive():
LOG.debug(self.TAG + 'Waiting {} to resume activity...'.format(self.th_keep.name))
sleep(0.1)
LOG.info(self.TAG + 'All threads stoped. AreaResilience module is stopped.')
else:
LOG.info(self.TAG + 'Module is not started')
return
def promotedToBackup(self, leaderIP):
"""
The agent is promoted to be a backup
:return:
"""
# First check if Agent was electable
self._leaderIP = leaderIP
if self._imCapable:
LOG.info(self.TAG + 'Becoming backup due leader selection.')
# Then, check if AreaResilience thread is running
if self.th_proc is None:
pass
elif not self.th_proc.is_alive():
pass
elif self._imLeader or self._imBackup:
LOG.error('Agent is already a Backup/Leader. Cannot become a Backup.')
return False
else:
LOG.warning('Area Resilience still starting. Cannot promote on this state. Waiting...')
while self.th_proc.is_alive():
sleep(0.1)
LOG.debug('Successful waiting.')
LOG.debug('Module is ready for promotion.')
self.th_proc = threading.Thread(name='area_res', target=self.__backupLeader_flow, daemon=True)
self.th_proc.start()
self.isStarted = True
return True
else:
if not self._startupCorrect:
LOG.warning('Area Resilience still starting. Cannot promote on this state.')
else:
LOG.error('Agent not capable to be Backup/Leader')
return False
def __common_flow(self):
self._connected = True
if not self.__imLeader():
LOG.info(self.TAG + 'I\'m not a Leader.')
# I'm not a leader
if self.__imCapable():
LOG.info(self.TAG + 'I\'m capable to be Leader.')
self._imCapable = True
# Can be a backup
self.__preSelectionSetup()
LOG.info(self.TAG + 'Waiting to be selected.')
else:
# Can not be a backup
LOG.info(self.TAG + 'I\'m NOT capable to be Leader.')
self._startupCorrect = True
if self._imLeader:
# Starting as Leader
self.__backupLeader_flow()
return
def __backupLeader_flow(self):
if not self._connected:
LOG.error('Module stoped due _connected = False')
return
if not self._imLeader:
# I've been promoted as backup
LOG.info(self.TAG + 'I\'m selected to be a backup. Seting up')
self.__preBackupSetup()
self.__becomeBackup()
if not self._connected:
return
# Multiple backups support
if self._backupPriority > 0:
sleep_time = 1. + 10 * (self._backupPriority - 1)
LOG.info('Waiting {}s before leader takeover...'.format(sleep_time))
sleep(sleep_time)
if not self._connected:
return
LOG.debug('Checking if new Leader is up...')
new_leader = self.__getCIMIData('disc_leaderIP', default='')
LOG.debug('Stored Leader = [{}], Detected Leader = [{}]'.format(self._leaderIP, new_leader))
if new_leader == '' or new_leader == self._leaderIP:
LOG.warning('Leader not detected by Discovery')
elif self._leaderIP != new_leader:
LOG.info('Correct Leader takeover by a backup with more preference.')
try: # TODO: Clean solution
r = requests.get('{}agent'.format(
URLS.build_url_address(URLS.URL_POLICIES_ROLECHANGE, addr='127.0.0.1', port=CPARAMS.POLICIES_PORT)),
timeout=.5)
except:
pass
finally:
return
if not self._connected:
return
if self._imLeader or self._leaderFailed:
# I'm a leader
LOG.info(self.TAG + 'Leader seting up')
self.__becomeLeader()
self.__backupSelection()
return
def __becomeLeader(self): # TODO
"""
:return:
"""
# 1- Shutdown/Notify all the modules involving Agent to Leader transiction.
if self._leaderFailed:
# Only if leader fails, triggers are needed, otherwise no action is required
try:
r = requests.get(URLS.build_url_address('{}leader'.format(URLS.URL_POLICIES_ROLECHANGE), portaddr=('127.0.0.1', '46050'))) #TODO Addr+Prt by CPARAMS; Parametrize
LOG.info(self.TAG + 'Trigger to AgentStart Switch done. {}'.format(r.json()))
self._imLeader = True
self._imBackup = False
except Exception as ex:
LOG.exception(self.TAG + '_becomeLeader trigger to AgentStart failed')
self.th_keep = threading.Thread(name='ar_keeper', target=self.__keeper, daemon=True)
self.th_keep.start()
def __getTopology(self): # TODO: Get actual CIMI data or Topology from environment
"""
:return:
"""
return self.__getCIMIData('topology', default=[]).copy()
def __backupSelection(self):
"""
:return:
"""
# TODO:
# 1- Check Backups
# 2- Enough backups?
# YES: End sleep(X)
# NO:
# 3- Get topology and select one agent
# If not capable: Go to 3
# 4- promote to Backup
# Success: Correct_backups++
# Go to 2
while self._connected:
correct_backups = 0
with self.backupDatabaseLock:
# Check backups
for backup in self.backupDatabase:
if backup.TTL >= 0:
correct_backups += 1
# Enough?
if correct_backups >= self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1):
# Enough backups
LOG.debug('{} correct backup detected in Leader. Everything is OK.'.format(correct_backups))
else:
# Not enough
if not self._connected:
break
LOG.warning('{} backup dettected are not enough. Electing new ones...'.format(correct_backups))
topology = self.__getTopology()
new_backups = []
while self._connected and correct_backups < self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1) and len(topology) > 0:
device = topology[0]
topology.remove(device)
found = False
with self.backupDatabaseLock:
for backup in self.backupDatabase:
if backup.deviceID == device.get('deviceID'):
found = True
break
if not found:
# Todo: Evaluate if selected device is capable
correct = self.__send_election_message(device.get('deviceIP'))
if correct:
new_backup = BackupEntry(device.get('deviceID'), device.get('deviceIP'), self._nextPriority)
with self.backupDatabaseLock:
self.backupDatabase.append(new_backup)
LOG.info('Backup {}[{}] added with priority {}'.format(device.get('deviceID'), device.get('deviceIP'), self._nextPriority))
correct_backups += 1
self._nextPriority += 1
new_backups.append(new_backups)
if correct_backups >= self._lpp.get(self._lpp.BACKUP_MINIMUM, default=1):
# Now we have enough
LOG.info('{} correct backups dettected in Leader. {} new backups added.'.format(correct_backups, len(new_backups)))
else:
LOG.warning('{} backups dettected are not enough. Waiting for new election.'.format(correct_backups))
# Sleep
if self._connected:
sleep(self._lpp.get(self._lpp.TIME_TO_WAIT_BACKUP_SELECTION))
LOG.info('Leader stopped...')
def __preSelectionSetup(self):
"""
:return:
"""
return
def __preBackupSetup(self):
"""
:return:
"""
if self._imBackup:
pass # Do something here if necessary
def __becomeBackup(self):
"""
:return:
"""
# 1- Send the KeepAlive message to the leader.
# 2- Receive the reply (with preference number).
# If leader down, Backup becomes leader.
# Else repeat.
attempt = 0
counter = 0
payload = {
'deviceID': self._deviceID
}
self._imBackup = True
while self._connected and attempt < self._lpp.get(self._lpp.MAX_RETRY_ATTEMPTS):
stopLoop = False
while self._connected and not stopLoop:
try:
# 1. Requests to Leader Keepalive endpoint
r = requests.post(URLS.build_url_address(URLS.URL_POLICIES_KEEPALIVE, portaddr=(self._leaderIP, CPARAMS.POLICIES_PORT)), json=payload, timeout=0.5)
LOG.debug(self.TAG + 'Keepalive sent [#{}]'.format(counter))
# 2. Process Reply
jreply = r.json()
if r.status_code == 200:
leaderID = jreply['deviceID'] # Todo: Use this
priority = jreply['backupPriority']
# 3. Update Preference
self._backupPriority = priority
LOG.debug(self.TAG + 'Reply received, Leader still | |
<filename>nitorch/nn/experimental/_affine.py
import torch
import torch.nn as tnn
from ..modules.base import Module
from ..modules.cnn import UNet, CNN
from ..modules.spatial import GridPull, GridExp, GridResize, AffineGrid, \
AffineExp, AffineLog, AffineClassic, \
AffineClassicInverse
from .. import check
from nitorch import spatial
from nitorch.core.linalg import matvec
from nitorch.core.utils import unsqueeze, channel2last, last2channel
from nitorch.core.py import make_list
class AffineMorph(Module):
"""Affine registration network.
This network builds on VoxelMorph, but replaces the U-Net by an
encoding CNN, and the dense spatial transformer by an affine spatial
transformer. Like VoxelMorph, this network encodes deformation on
their tangent space: here, the Lie algebra of a variety of affine
Lie groups is used. Affine transformation matrices are recovered
from their Lie algebra representation using matrix exponentiation.
* VoxelMorph:
target |-(unet)-> velocity -(exp)-> grid -(pull)-> warped_source
source |------------------------------------^
* AffineMorph:
target |-(cnn)-> lieparam -(exp)-> affine -(pull)-> warped_source
source |-------------------------------------^
"""
def __init__(self, dim, basis='CSO', encoder=None, stack=None,
kernel_size=3, interpolation='linear', bound='dct2', *,
_additional_input_channels=0, _additional_output_channels=0):
"""
Parameters
----------
dim : int
Dimensionalityy of the input (1|2|3)
basis : {'T', 'SO', 'SE', 'D', 'CSO', 'SL', 'GL+', 'Aff+'}, default='CSO'
Basis of a matrix Lie group:
* 'T' : Translations
* 'SO' : Special Orthogonal (rotations)
* 'SE' : Special Euclidean (translations + rotations)
* 'D' : Dilations (translations + isotropic scalings)
* 'CSO' : Conformal Special Orthogonal
(translations + rotations + isotropic scalings)
* 'SL' : Special Linear (rotations + isovolumic zooms + shears)
* 'GL+' : General Linear [det>0] (rotations + zooms + shears)
* 'Aff+': Affine [det>0] (translations + rotations + zooms + shears)
encoder : list[int], optional
Number of channels after each encoding layer of the CNN.
stack : list[int], optional
Number of channels after each fully-connected layer of the CNN.
kernel_size : int or list[int], default=3
Kernel size of the UNet.
interpolation : int, default=1
Interpolation order.
bound : bound_type, default='dct2'
Boundary conditions of the image.
"""
super().__init__()
exp = AffineExp(dim, basis=basis)
nb_prm = sum(
b.shape[0] for b in exp.basis) + _additional_output_channels
self.cnn = CNN(dim,
input_channels=2 + _additional_input_channels,
output_channels=nb_prm,
encoder=encoder,
stack=stack,
kernel_size=kernel_size,
activation=tnn.LeakyReLU(0.2),
final_activation=None)
self.exp = exp
self.grid = AffineGrid(shift=True)
self.pull = GridPull(interpolation=interpolation,
bound=bound,
extrapolate=False)
self.dim = dim
# register losses/metrics
self.tags = ['image', 'affine']
def forward(self, source, target, *, _loss=None, _metric=None):
"""
Parameters
----------
source : tensor (batch, channel, *spatial)
Source/moving image
target : tensor (batch, channel, *spatial)
Target/fixed image
_loss : dict, optional
If provided, all registered losses are computed and appended.
_metric : dict, optional
If provided, all registered metrics are computed and appended.
Returns
-------
deformed_source : tensor (batch, channel, *spatial)
Deformed source image
affine_prm : tensor (batch,, *spatial, len(spatial))
affine Lie parameters
"""
# sanity checks
check.dim(self.dim, source, target)
check.shape(target, source, dims=[0], broadcast_ok=True)
check.shape(target, source, dims=range(2, self.dim + 2))
# chain operations
source_and_target = torch.cat((source, target), dim=1)
affine_prm = self.cnn(source_and_target)
affine_prm = affine_prm.reshape(affine_prm.shape[:2])
affine = []
for prm in affine_prm:
affine.append(self.exp(prm))
affine = torch.stack(affine, dim=0)
grid = self.grid(affine, shape=target.shape[2:])
deformed_source = self.pull(source, grid)
# compute loss and metrics
self.compute(_loss, _metric,
image=[deformed_source, target],
affine=[affine_prm])
return deformed_source, affine_prm
class AffineMorphSemiSupervised(AffineMorph):
"""An AffineMorph network with a Categorical loss.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tags += ['segmentation']
def forward(self, source, target, source_seg=None, target_seg=None,
*, _loss=None, _metric=None):
# sanity checks
check.dim(self.dim, source, target, source_seg, target_seg)
check.shape(target, source, dims=[0], broadcast_ok=True)
check.shape(target, source, dims=range(2, self.dim + 2))
check.shape(target_seg, source_seg, dims=[0], broadcast_ok=True)
check.shape(target_seg, source_seg, dims=range(2, self.dim + 2))
# chain operations
source_and_target = torch.cat((source, target), dim=1)
affine_prm = self.cnn(source_and_target)
affine_prm = affine_prm.reshape(affine_prm.shape[:2])
affine = []
for prm in affine_prm:
affine.append(self.exp(prm))
affine = torch.stack(affine, dim=0)
grid = self.grid(affine, shape=target.shape[2:])
deformed_source = self.pull(source, grid)
if source_seg is not None:
if source_seg.shape[2:] != source.shape[2:]:
grid = spatial.resize_grid(grid, shape=source_seg.shape[2:])
deformed_source_seg = self.pull(source_seg, grid)
else:
deformed_source_seg = None
# compute loss and metrics
self.compute(_loss, _metric,
image=[deformed_source, target],
affine=[affine_prm],
segmentation=[deformed_source_seg, target_seg])
if source_seg is None:
return deformed_source, affine_prm
else:
return deformed_source, deformed_source_seg, affine_prm
class AffineVoxelMorph(Module):
"""Affine + diffeo registration network.
"""
def __init__(self, dim, basis='CSO',
encoder=None, decoder=None, stack=None, kernel_size=3,
interpolation='linear', image_bound='dct2', grid_bound='dft',
downsample_velocity=2, *, _input_channels=2):
super().__init__()
resize_factor = make_list(downsample_velocity, dim)
resize_factor = [1 / f for f in resize_factor]
affexp = AffineExp(dim, basis=basis)
nb_prm = sum(b.shape[0] for b in affexp.basis)
self.cnn = CNN(dim,
input_channels=2,
output_channels=nb_prm,
encoder=encoder,
stack=stack,
kernel_size=kernel_size,
activation=tnn.LeakyReLU(0.2),
final_activation=None)
self.affexp = affexp
self.unet = UNet(dim,
input_channels=_input_channels,
output_channels=dim,
encoder=encoder,
decoder=decoder,
kernel_size=kernel_size,
activation=tnn.LeakyReLU(0.2))
self.resize = GridResize(interpolation=interpolation,
bound=grid_bound,
factor=resize_factor)
self.velexp = GridExp(interpolation=interpolation,
bound=grid_bound)
self.pull = GridPull(interpolation=interpolation,
bound=image_bound,
extrapolate=False)
self.dim = dim
# register losses/metrics
self.tags = ['image', 'velocity', 'affine', 'segmentation']
def exp(self, velocity, affine=None, displacement=False):
"""Generate a deformation grid from tangent parameters.
Parameters
----------
velocity : (batch, *spatial, nb_dim)
Stationary velocity field
affine : (batch, nb_prm)
Affine parameters
displacement : bool, default=False
Return a displacement field (voxel to shift) rather than
a transformation field (voxel to voxel).
Returns
-------
grid : (batch, *spatial, nb_dim)
Deformation grid (transformation or displacment).
"""
info = {'dtype': velocity.dtype, 'device': velocity.device}
# generate grid
shape = velocity.shape[1:-1]
velocity_small = self.resize(velocity, type='displacement')
grid = self.velexp(velocity_small)
grid = self.resize(grid, shape=shape, type='grid')
if affine is not None:
# exponentiate
affine_prm = affine
affine = []
for prm in affine_prm:
affine.append(self.affexp(prm))
affine = torch.stack(affine, dim=0)
# shift center of rotation
affine_shift = torch.cat((
torch.eye(self.dim, **info),
-torch.as_tensor(shape, **info)[:, None] / 2),
dim=1)
affine = spatial.affine_matmul(affine, affine_shift)
affine = spatial.affine_lmdiv(affine_shift, affine)
# compose
affine = unsqueeze(affine, dim=-3, ndim=self.dim)
lin = affine[..., :self.dim, :self.dim]
off = affine[..., :self.dim, -1]
grid = matvec(lin, grid) + off
if displacement:
grid = grid - spatial.identity_grid(grid.shape[1:-1], **info)
return grid
def forward(self, source, target, source_seg=None, target_seg=None,
*, _loss=None, _metric=None):
"""
Parameters
----------
source : tensor (batch, channel, *spatial)
Source/moving image
target : tensor (batch, channel, *spatial)
Target/fixed image
_loss : dict, optional
If provided, all registered losses are computed and appended.
_metric : dict, optional
If provided, all registered metrics are computed and appended.
Returns
-------
deformed_source : tensor (batch, channel, *spatial)
Deformed source image
affine_prm : tensor (batch,, *spatial, len(spatial))
affine Lie parameters
"""
# sanity checks
check.dim(self.dim, source, target, source_seg, target_seg)
check.shape(target, source, dims=[0], broadcast_ok=True)
check.shape(target, source, dims=range(2, self.dim + 2))
check.shape(target_seg, source_seg, dims=[0], broadcast_ok=True)
check.shape(target_seg, source_seg, dims=range(2, self.dim + 2))
# chain operations
source_and_target = torch.cat((source, target), dim=1)
# generate affine
affine_prm = self.cnn(source_and_target)
affine_prm = affine_prm.reshape(affine_prm.shape[:2])
# generate velocity
velocity = self.unet(source_and_target)
velocity = channel2last(velocity)
# generate deformation grid
grid = self.exp(velocity, affine_prm)
# deform
deformed_source = self.pull(source, grid)
if source_seg is not None:
if source_seg.shape[2:] != source.shape[2:]:
grid = spatial.resize_grid(grid, shape=source_seg.shape[2:])
deformed_source_seg = self.pull(source_seg, grid)
else:
deformed_source_seg = None
# compute loss and metrics
self.compute(_loss, _metric,
image=[deformed_source, target],
velocity=[velocity],
segmentation=[deformed_source_seg, target_seg],
affine=[affine_prm])
if deformed_source_seg is None:
return deformed_source, velocity, affine_prm
else:
return deformed_source, deformed_source_seg, velocity, affine_prm
class DenseToAffine(Module):
"""Convert a dense displacement field to an affine matrix"""
def __init__(self, shift=True):
"""
Parameters
----------
shift : bool, default=True
Apply a shift so that the center of rotation is in the
center of the field of view.
"""
super().__init__()
self.shift = shift
def forward(self, grid, **overload):
"""
Parameters
----------
grid : (N, *spatial, dim)
Displacement grid
overload : dict
Returns
-------
aff : (N, dim+1, dim+1)
Affine matrix that is closest to grid in the least square sense
"""
shift = overload.get('shift', self.shift)
grid = torch.as_tensor(grid)
info = dict(dtype=grid.dtype, device=grid.device)
nb_dim = grid.shape[-1]
shape = grid.shape[1:-1]
if shift:
affine_shift = torch.cat((
torch.eye(nb_dim, **info),
-torch.as_tensor(shape, **info)[:, None] / 2),
dim=1)
affine_shift = spatial.as_euclidean(affine_shift)
# the forward model is:
# phi(x) = M\A*M*x
# where phi is a *transformation* field, M is the shift matrix
# and A is the affine matrix.
# We can decompose phi(x) = x + d(x), where d is a *displacement*
# field, yielding:
# d(x) = M\A*M*x - x = (M\A*M - I)*x := B*x
# If we write `d(x)` and `x` as large vox*(dim+1) matrices `D`
# and `G`, we have:
# D = G*B'
# Therefore, the least squares B is obtained as:
# B' = inv(G'*G) * (G'*D)
# Then, A is
# A = M*(B + I)/M
#
# Finally, we project | |
hits=self.hits,
hits_index=self.hits_index,
last_completed_m26_frame_ids=self.last_completed_m26_frame_ids,
timing_offset=self.timing_offset,
build_all_events=build_all_events,
analyze_m26_header_ids=self.analyze_m26_header_ids,
plane_id_to_index=self.plane_id_to_index)
# Create a copy of the hits array that is returned
hits = self.hits[:self.hits_index + 1].copy()
self.hits_index -= (self.hits_index + 1)
return hits, telescope_data
@njit(locals={'trigger_data_index': numba.int64, 'telescope_data_index': numba.int64, 'trigger_status': numba.uint32, 'last_trigger_number': numba.int64, 'last_trigger_timestamp': numba.int64, 'n_missing_events': numba.uint32})
def _interpret_raw_data(raw_data, trigger_data, trigger_data_index, telescope_data, telescope_data_index, m26_frame_ids, m26_frame_length, m26_data_loss, m26_word_index, m26_timestamps, last_m26_timestamps, m26_n_words, m26_rows, m26_frame_status, last_completed_m26_frame_ids, event_number, trigger_number, trigger_timestamp, add_missing_events, build_all_events, analyze_m26_header_ids, plane_id_to_index):
''' This function is interpreting the Mimosa26 telescope raw data and creates temporary trigger and telescope data arrays.
The interpreter checks for trigger and Mimosa26 data errors.
Parameters:
-----------
raw_data : np.array
The array with the raw data words.
TBD
'''
# Loop over the raw data words
for raw_data_word in raw_data:
if is_mimosa_data(raw_data_word): # Check if word is from Mimosa26.
# Check to which plane the data belongs
plane_id = get_plane_number(raw_data_word) # The actual_plane if the actual word belongs to (0 to 5)
for analyze_m26_header_id in analyze_m26_header_ids:
if plane_id == analyze_m26_header_id:
break
else:
continue # Do not interpret data of planes which should be skipped
plane_index = plane_id_to_index[plane_id]
# In the following, interpretation of the raw data words of the actual plane
# Check for data loss bit set by the M26 RX FSM
if is_data_loss(raw_data_word):
# Setting the data loss flag to true.
# The data loss bit is set by the M26 RX FSM.
# The bit is set only once after each data loss, i.e.,
# the first data word after the lost data words.
m26_data_loss[plane_index] = True
if is_frame_header(raw_data_word): # New frame for actual plane, M26 timestamp (LSB), frame header0
# Get Mimosa26 timestamp from raw data word (LSB)
last_m26_timestamps[plane_index] = m26_timestamps[plane_index]
m26_timestamps[plane_index] = (m26_timestamps[plane_index] & 0x7fffffffffff0000) | get_m26_timestamp_low(raw_data_word)
m26_word_index[plane_index] = 0
# Reset parameters after header
m26_frame_length[plane_index] = 0
m26_n_words[plane_index] = 0
# Set the status bits for priviously incomplete frames
index = telescope_data_index
while index >= 0:
if telescope_data[index]['plane'] == plane_id:
if telescope_data[index]['frame_id'] > last_completed_m26_frame_ids[plane_index]:
telescope_data[index]['frame_status'] |= DATA_ERROR
else:
break
index -= 1
m26_data_loss[plane_index] = False
m26_frame_status[plane_index] = 0
elif m26_data_loss[plane_index] is True: # Trash data
# Nothing to do, do not trust data
continue
else: # Interpreting M26 raw data
m26_word_index[plane_index] += 1
if m26_word_index[plane_index] == 1: # Mimosa26 timestamp, M26 timestamp (MSB), frame header1
# Check for 32bit timestamp overflow
if m26_timestamps[plane_index] >= 0 and get_m26_timestamp_high(raw_data_word) < (m26_timestamps[plane_index] & 0x00000000ffff0000):
m26_frame_status[plane_index] |= TIMESTAMP_OVERFLOW
m26_timestamps[plane_index] = np.int64(2**32) + m26_timestamps[plane_index]
# Get Mimosa26 timestamp from raw data word (MSB)
m26_timestamps[plane_index] = get_m26_timestamp_high(raw_data_word) | (m26_timestamps[plane_index] & 0x7fffffff0000ffff)
elif m26_word_index[plane_index] == 2: # Mimosa26 frame ID
# Get Mimosa26 frame ID from raw data word (LSB)
m26_frame_ids[plane_index] = (m26_frame_ids[plane_index] & 0x7fffffffffff0000) | get_frame_id_low(raw_data_word)
elif m26_word_index[plane_index] == 3: # Mimosa26 frame ID
# Check for 32bit frame ID overflow
if m26_frame_ids[plane_index] >= 0 and get_frame_id_high(raw_data_word) < (m26_frame_ids[plane_index] & 0x00000000ffff0000):
m26_frame_status[plane_index] |= FRAME_ID_OVERFLOW
m26_frame_ids[plane_index] = np.int64(2**32) + m26_frame_ids[plane_index]
# Get Mimosa26 frame ID from raw data word (MSB)
m26_frame_ids[plane_index] = get_frame_id_high(raw_data_word) | (m26_frame_ids[plane_index] & 0x7fffffff0000ffff)
elif m26_word_index[plane_index] == 4: # Mimosa26 frame length
m26_frame_length[plane_index] = get_frame_length(raw_data_word)
if m26_frame_length[plane_index] > 570: # Defined in the Mimosa26 protocol, no more than 570 "useful" data words
m26_data_loss[plane_index] = True
continue
elif m26_word_index[plane_index] == 5: # Mimosa26 frame length, a second time
if m26_frame_length[plane_index] != get_frame_length(raw_data_word): # DO0 & DO1 should always have the same data length
m26_data_loss[plane_index] = True
continue
else:
m26_frame_length[plane_index] += get_frame_length(raw_data_word)
elif m26_word_index[plane_index] == 5 + m26_frame_length[plane_index] + 1: # Frame trailer0
if not is_frame_trailer0(raw_data_word):
m26_data_loss[plane_index] = True
continue
elif m26_word_index[plane_index] == 5 + m26_frame_length[plane_index] + 2: # Frame trailer1
if not is_frame_trailer1(raw_data_word, plane=plane_id):
m26_data_loss[plane_index] = True
continue
else:
last_completed_m26_frame_ids[plane_index] = m26_frame_ids[plane_index]
elif m26_word_index[plane_index] > 5 + m26_frame_length[plane_index] + 2: # Ignore any occurrence of additional raw data words
m26_data_loss[plane_index] = True
continue
else: # Column / Row words (actual data word with hits)
if m26_n_words[plane_index] == 0: # First word contains the row info and the number of data words for this row
if m26_word_index[plane_index] == 5 + m26_frame_length[plane_index]: # Always even amount of words or this fill word is used
# Ignore this fill word
continue
else:
m26_n_words[plane_index] = get_n_words(raw_data_word)
m26_rows[plane_index] = get_row(raw_data_word) # Get row from data word
if m26_rows[plane_index] >= 576: # Row overflow
m26_data_loss[plane_index] = True
continue
if has_overflow(raw_data_word):
m26_frame_status[plane_index] |= OVERFLOW_FLAG # set overflow bit
else:
m26_frame_status[plane_index] & ~OVERFLOW_FLAG # unset overflow bit
else:
m26_n_words[plane_index] = m26_n_words[plane_index] - 1 # Count down the words
n_hits = get_n_hits(raw_data_word)
column = get_column(raw_data_word) # Get column from data word
if column >= 1152: # Column overflow
m26_data_loss[plane_index] = True
continue
for k in range(n_hits + 1):
if column + k >= 1152:
m26_data_loss[plane_index] = True
break
# Increase index
telescope_data_index += 1
# extend telescope data array if neccessary
if telescope_data_index >= telescope_data.shape[0]:
# remove old hit data from array for each plane individually. Prevents the case that telescope data array gets too big in case
# time until next trigger is very large, since telescope data has to be buffered until next trigger.
select = (telescope_data['plane'] == plane_id)
select &= (telescope_data['time_stamp'] < (m26_timestamps[plane_index] - MAX_BUFFER_TIME_SLIP * MIMOSA_FREQ * 10**6))
count_outdated = np.sum(select)
if count_outdated:
telescope_data = telescope_data[~select]
telescope_data_index = telescope_data_index - count_outdated
# extend telescope data array if neccessary
telescope_data_tmp = np.zeros(shape=max(1, int(raw_data.shape[0] / 2)), dtype=telescope_data_dtype)
telescope_data = np.concatenate((telescope_data, telescope_data_tmp))
# Store hits
telescope_data[telescope_data_index]['plane'] = plane_id
telescope_data[telescope_data_index]['time_stamp'] = m26_timestamps[plane_index]
telescope_data[telescope_data_index]['frame_id'] = m26_frame_ids[plane_index]
telescope_data[telescope_data_index]['column'] = column + k
telescope_data[telescope_data_index]['row'] = m26_rows[plane_index]
telescope_data[telescope_data_index]['frame_status'] = m26_frame_status[plane_index]
elif is_trigger_word(raw_data_word): # Raw data word is TLU/trigger word
# Reset trigger status
trigger_status = 0
# Get latest telescope timestamp and set trigger timestamp
last_trigger_timestamp = trigger_timestamp
# Get largest M26 timestamp
for tmp_plane_index, _ in enumerate(analyze_m26_header_ids):
if last_m26_timestamps[tmp_plane_index] > trigger_timestamp:
trigger_timestamp = last_m26_timestamps[tmp_plane_index]
# Calculating 63bit timestamp from 15bit trigger timestamp
# and last telescope timestamp (frame header timestamp).
# Assumption: the telescope timestamp is updated more frequent than
# the 15bit trigger timestamp can overflow. The frame is occurring
# every 4608 clock cycles (115.2 us).
# Get trigger timestamp from raw data word
trigger_timestamp = (0x7fffffffffff8000 & trigger_timestamp) | get_trigger_timestamp(raw_data_word)
# Check for 15bit trigger timestamp overflow
if last_trigger_timestamp >= 0 and trigger_timestamp <= last_trigger_timestamp:
trigger_status |= TRIGGER_TIMESTAMP_OVERFLOW
trigger_timestamp = np.int64(2**15) + trigger_timestamp
# Copy of trigger number
last_trigger_number = trigger_number
# Check for 16bit trigger number overflow
if trigger_number >= 0 and get_trigger_number(raw_data_word, trigger_data_format=2) <= (trigger_number & 0x000000000000ffff):
trigger_status |= TRIGGER_NUMBER_OVERFLOW
trigger_number = np.int64(2**16) + trigger_number
# Get trigger number from raw data word
if trigger_number < 0:
trigger_number = get_trigger_number(raw_data_word, trigger_data_format=2)
else:
trigger_number = (0x7fffffffffff0000 & trigger_number) | get_trigger_number(raw_data_word, trigger_data_format=2)
# Check validity of trigger number
# Trigger number has to increase by 1
if trigger_data_index >= 0:
# Check if trigger number has increased by 1
if last_trigger_number < 0:
n_missing_events = 0
else:
n_missing_events = trigger_number - (last_trigger_number + 1)
if n_missing_events != 0:
if n_missing_events > 0 and add_missing_events:
for i in range(n_missing_events):
# Increase index
trigger_data_index += 1
# extend trigger data array if neccessary
if trigger_data_index >= trigger_data.shape[0]:
trigger_data_tmp = np.zeros(shape=max(1, int(raw_data.shape[0] / 6)), dtype=trigger_data_dtype)
trigger_data = np.concatenate((trigger_data, trigger_data_tmp))
# Increase event number
event_number += 1
# Store trigger data
trigger_data[trigger_data_index]['event_number'] = event_number # Timestamp of TLU word
trigger_data[trigger_data_index]['trigger_time_stamp'] = -1 # Timestamp of TLU word
trigger_data[trigger_data_index]['trigger_number'] = trigger_data[trigger_data_index - 1]['trigger_number'] + 1 + i
trigger_data[trigger_data_index]['trigger_status'] = NO_TRIGGER_WORD_ERROR # Trigger status
else:
trigger_status |= TRIGGER_NUMBER_ERROR
# Increase index
trigger_data_index += 1
# extend trigger data array if neccessary
if trigger_data_index >= trigger_data.shape[0]:
trigger_data_tmp = np.zeros(shape=max(1, int(raw_data.shape[0] / 6)), dtype=trigger_data_dtype)
trigger_data = np.concatenate((trigger_data, trigger_data_tmp))
# Increase event number
event_number += 1
# Store trigger data
trigger_data[trigger_data_index]['event_number'] = event_number # Timestamp of TLU word
trigger_data[trigger_data_index]['trigger_number'] = trigger_number
trigger_data[trigger_data_index]['trigger_time_stamp'] = trigger_timestamp # Timestamp of TLU word
trigger_data[trigger_data_index]['trigger_status'] = trigger_status # Trigger status
else: # Raw data contains unknown word, neither M26 nor TLU word
for tmp_plane_index, _ in | |
import random
from modules import scorecard
from modules.commentary import scoreRun
from modules.scoreinput import playBall
from modules.batterchoice import batterChoice
from modules.bowlerchoice import fieldChoice
# First Innings - Main code
# team_1_array : Array containing name of all players in the first team
# team_2_array : Array containing name of all players in the second team
# innings : First or second innings, except for test cricket.
# bat_bowl_choice : Result of the toss ( bat / field )
# batting_score : The team score, usually initialized to zero.
# innings_data : Contains the details of the innings
# start_message : Message displayed before innings start.
# max_overs : Maximum number of overs available in the innings.
# max_wickets : Maximum number of wickets in the innings.
# is_test : Boolean: Is the game a test match or a limited-overs match?
def scoringInnings(team_1_array,
team_2_array,
innings,
bat_bowl_choice,
batting_score,
innings_data,
start_message,
max_overs,
max_wickets,
is_test):
""" Play the scoring innings
In limited-overs cricket, this is always the first innings
Arguments:
team_1_array : (List) Names of all players in the first team
team_2_array : (List) Names of all players in the second team
innings : (int) First or second innings, except for test cricket.
bat_bowl_choice : (string) Result of the toss ( bat / field )
batting_score : (int) The team score, usually initialized to zero.
innings_data : (object) Contains the details of the innings
start_message : (string) Message displayed before innings start.
max_overs : (int) Maximum number of overs available in the innings.
max_wickets : (int) Maximum number of wickets in the innings.
is_test : (Boolean) Is this a test match or a limited-over match?
Returns:
int: The score of the team
Note:
During runtime, this function modifies the innings data object.
"""
# Regenerate team details
T1 = team_1_array[0]
T2 = team_2_array[0]
# Batters
A1 = team_1_array[1]
A2 = team_1_array[2]
A3 = team_1_array[3]
A4 = team_1_array[4]
A5 = team_1_array[5]
A6 = team_1_array[6]
A7 = team_1_array[7]
A8 = team_1_array[8]
A9 = team_1_array[9]
A10 = team_1_array[10]
A11 = team_1_array[11]
# Bowlers
B1 = team_2_array[1]
B2 = team_2_array[2]
B3 = team_2_array[3]
B4 = team_2_array[4]
B5 = team_2_array[5]
B6 = team_2_array[6]
B7 = team_2_array[7]
B8 = team_2_array[8]
B9 = team_2_array[9]
B10 = team_2_array[10]
B11 = team_2_array[11]
# Innings setup
innings_data["battingteam"] = T1
innings_data["bowlingteam"] = T2
print(start_message)
# Initialize team score = 0
batting_score = 0
# Score for each ball faced in each over of the first innings
ball_score = []
# Numerical score for each ball faced in an over, first innings
ball_score_integervalue = []
# List of all batters not out
batterlist = [A1, A3, A4, A5, A6, A7, A8, A9, A10, A11, A2]
# Choose the openers
player1 = batterChoice(batter_list=batterlist,
non_striker=0,
innings=innings,
user_choice_batfield=bat_bowl_choice,
is_batter_human=team_1_array[14])
player2 = batterChoice(batter_list=batterlist,
non_striker=player1,
innings=innings,
user_choice_batfield=bat_bowl_choice,
is_batter_human=team_1_array[14])
# Batter statistics
batter_stats = {
A1: [0, 0, 0, 0],
A2: [0, 0, 0, 0],
A3: [0, 0, 0, 0],
A4: [0, 0, 0, 0],
A5: [0, 0, 0, 0],
A6: [0, 0, 0, 0],
A7: [0, 0, 0, 0],
A8: [0, 0, 0, 0],
A9: [0, 0, 0, 0],
A10: [0, 0, 0, 0],
A11: [0, 0, 0, 0]
}
# Bowler statistics
bowler_stats = {
B1: [0, 0, 0, 0],
B2: [0, 0, 0, 0],
B3: [0, 0, 0, 0],
B4: [0, 0, 0, 0],
B5: [0, 0, 0, 0],
B6: [0, 0, 0, 0],
B7: [0, 0, 0, 0],
B8: [0, 0, 0, 0],
B9: [0, 0, 0, 0],
B10: [0, 0, 0, 0],
B11: [0, 0, 0, 0]
}
# List of all players in fielding side for bowler selection
bowlerlist = [B1, B2, B3, B4, B5, B6, B7, B8, B9, B10, B11]
bowlers_history = ['']
score = 0
wicket = 0
# Over increment variable: i
i = 1
# Declaring variable
isInningsDeclared = False
# First player is on strike
onstrike = player1
gameIsPlaying = True
while gameIsPlaying:
# i^th over
# for i in range(1, max_overs+1):
# End the innings if all overs are bowled
if i == max_overs + 1 and not is_test:
gameIsPlaying = False
# End the innings if the batting side is all out
elif wicket == max_wickets:
gameIsPlaying = False
# End the innings in test match if innings closed
elif isInningsDeclared:
gameIsPlaying = False
# Innings in progress
else:
over = i
print("Over", i)
# The fielding side selects the bowler
bowler = fieldChoice(bowlerlist,
innings,
bat_bowl_choice,
is_bowler_human=team_2_array[14])
# A bowler can't bowl for more than 20% of total overs
# No bowler is allowed consecutive overs
if (bowler_stats[bowler][0] >= (max_overs / 5)
or bowler == bowlers_history[i-1]):
bowlerlist.pop(bowlerlist.index(bowler))
bowler = random.choice(bowlerlist)
if bowler_stats[bowler][0] < (max_overs / 5) or is_test:
bowlerlist.append(bowlers_history[i-1])
# Each over has 6 balls
for j in range(1, 7):
# End the innings once the batting side is all out
if wicket == max_wickets:
gameIsPlaying = False
# End the innings in test match if innings closed
elif isInningsDeclared:
gameIsPlaying = False
# Over in progress
else:
print("Ball", j)
# Bat or bowl
ball_outcome = playBall(bowler=bowler,
batter=onstrike,
is_declare_permitted=is_test,
bowler_human=team_2_array[14],
batter_human=team_1_array[14])
# Outcome of the ball: 0, 1, 2, 3, 4, 5, 6, W.
# To declare test innings: Ball outcome: -1.
# Declaring innings works only in test cricket.
if ball_outcome == 'Declared':
run = 0
isInningsDeclared = True
# Batter scores runs
elif ball_outcome != 'W':
run = int(ball_outcome)
# Batter is out
else:
ball_outcome = 'W'
run = 0
# The team loses a wicket
wicket += 1
# The bowler claims a wicket
# (NOTE: run-out is not supported)
bowler_stats[bowler][3] += 1
# Add the outcome to the score
score = score + run
# The bowler bowled a ball.
# Add to bowler statistics.
if j == 6:
# The bowler bowled an over
bowler_stats[bowler][0] = int(
(((bowler_stats[bowler][0])*10)+5)/10)
else:
# The bowler did not complete the over.
bowler_stats[bowler][0] = (
((bowler_stats[bowler][0])*10)+1)/10
# The bowler concedes runs
bowler_stats[bowler][2] += run
# The batter scored the runs
batter_stats[onstrike][0] += run
# The batter faced a ball
batter_stats[onstrike][1] += 1
# Increment number of 4s if batter scores a four
if run == 4:
batter_stats[onstrike][2] += 1
# Increment number of 6s if batter scores a six
if run == 6:
batter_stats[onstrike][3] += 1
# Display the outcome and the commentary.
scoreRun(score=ball_outcome,
bowler=bowler,
batter=onstrike)
# When a wicket falls,
if ball_outcome == 'W' and wicket < max_wickets:
# The dismissed batter walks back
if onstrike == player1:
player1 = ''
elif player2 == onstrike:
player2 = ''
# Select the new batter.
if bat_bowl_choice == 'bat':
batterlist.append('')
if player1 == '':
c = bat_bowl_choice
c1 = team_1_array[14]
onstrike = batterChoice(batter_list=batterlist,
non_striker=player2,
innings=innings,
user_choice_batfield=c,
is_batter_human=c1)
player1 = onstrike
elif player2 == '':
c = bat_bowl_choice
c1 = team_1_array[14]
onstrike = batterChoice(batter_list=batterlist,
non_striker=player1,
innings=innings,
user_choice_batfield=c,
is_batter_human=c1)
player2 = onstrike
# Append the outcome to the over.
# Wicket counts as 'W'.
ball_score.append(ball_outcome)
# Generate the outcome metadata
if onstrike == player1:
nonstriker = player2
else:
nonstriker = player1
ball_stats = {
"bowler": bowler,
"batter": onstrike,
"nonstriker": nonstriker,
"over": i,
"ball": j,
"result": ball_outcome
}
# Append the outcome to the entire innings
innings_data["data"].append(ball_stats)
# Append the scored runs to the over.
# No run is scored as a batter is dismissed.
# Hence, wicket counts as 0
ball_score_integervalue.append(run)
# The batters cross for runs.
# If odd number of runs are scored,
# the batters interchange positions
if run % 2 != 0:
if onstrike == player1:
onstrike = player2
else:
onstrike = player1
# End if the over. This bowler just completed an over
bowlers_history.append(bowler)
# Maiden over bowled
if ball_score_integervalue == [0, 0, 0, 0, 0, 0]:
bowler_stats[bowler][1] += 1
# Display the over statistics
print("This over:", ball_score)
print("Batting:")
batter1_stats_string = (str(player1)
+ " : "
+ str(batter_stats[player1][0])
+ " ("
+ str(batter_stats[player1][1])
+ ")")
print(batter1_stats_string)
batter2_stats_string = (str(player2)
+ " : "
+ str(batter_stats[player2][0])
| |
import datetime as dt
import numbers
from functools import cached_property, partial
from typing import List, Sequence, Union
import numpy as np
import pandas as pd
from . import algo, constants, DateUtils, utils
from .config import get_db_interface
from .DBInterface import DBInterface
from .utils import TickerSelector
class FactorBase(object):
def __init__(self, factor_name: str = None):
super().__init__()
self._factor_name = factor_name
self.name = factor_name
def set_factor_name(self, name):
self.name = name
return self
def get_data(self, *args, **kwargs) -> Union[pd.Series, List]:
"""获取数据"""
s = self._get_data(*args, **kwargs)
if self.name and isinstance(s, pd.Series):
s.name = self.name
return s
def _get_data(self, *args, **kwargs) -> pd.Series:
"""获取数据"""
raise NotImplementedError()
def __and__(self, other):
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) & self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __invert__(self):
def sub_get_data(self, **kwargs):
return ~self.f.get_data(**kwargs)
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def __add__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) + other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) + self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __sub__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) - other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) - self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'get_data': sub_get_data})
return Foo(self, other)
def __mul__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) * other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) * self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __truediv__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) / other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) / self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __gt__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) > other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) > self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __lt__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) < other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) < self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __ge__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) >= other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) >= self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __le__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) <= other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) <= self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __eq__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) == other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) == self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __ne__(self, other):
if isinstance(other, (numbers.Number, np.number)):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs) != other
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
else:
def sub_get_data(self, **kwargs):
return self.f1.get_data(**kwargs) != self.f2.get_data(**kwargs)
Foo = type('', (BinaryFactor,), {'_get_data': sub_get_data})
return Foo(self, other)
def __abs__(self):
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).abs()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def __neg__(self):
def sub_get_data(self, **kwargs):
return -self.f.get_data(**kwargs)
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def max(self):
"""analogue to max for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().max()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def log(self):
"""analogue to numpy.log"""
def sub_get_data(self, **kwargs):
return np.log(self.f.get_data(**kwargs))
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def pct_change(self):
"""analogue to pd.pct_change for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().pct_change().stack().dropna()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def diff(self):
"""analogue to pd.diff for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().diff().stack().dropna()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def shift(self, n: int):
"""analogue to pd.shift(n) for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().shift(n).stack().dropna()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def diff_shift(self, shift: int):
"""analogue to pd.diff().shift(n) for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().diff().shift(shift).stack().dropna()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def pct_change_shift(self, shift: int):
"""analogue to pd.pct_change().shift(n) for each ``ID``"""
def sub_get_data(self, **kwargs):
return self.f.get_data(**kwargs).unstack().pct_change().shift(shift).stack().dropna()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def weight(self):
def sub_get_data(self, **kwargs):
data = self.f.get_data(**kwargs)
return data / data.groupby('DateTime').sum()
Foo = type('', (UnaryFactor,), {'_get_data': sub_get_data})
return Foo(self)
def corr(self, other):
pass
def bind_params(self, ids: Union[str, Sequence[str]] = None, index_code: str = None):
if ids:
self._get_data = partial(self._get_data, ids=ids)
if index_code:
self._get_data = partial(self._get_data, index_code=index_code)
return self
class UnaryFactor(FactorBase):
def __init__(self, f: FactorBase):
super().__init__()
self.f = f
def _get_data(self, *args, **kwargs):
"""获取数据"""
raise NotImplementedError()
class BinaryFactor(FactorBase):
def __init__(self, f1: FactorBase, f2: FactorBase):
super().__init__()
self.f1 = f1
self.f2 = f2
def _get_data(self, *args, **kwargs):
"""获取数据"""
raise NotImplementedError()
class Factor(FactorBase):
"""
Factor base class
"""
def __init__(self, table_name: str, factor_name: str, db_interface: DBInterface = None):
super().__init__(factor_name)
self.table_name = table_name
self.db_interface = db_interface if db_interface else get_db_interface()
def _get_data(self, *args, **kwargs):
"""获取数据"""
raise NotImplementedError()
# helper functions
def _check_args(self, table_name: str, factor_name: str):
table_name = table_name.lower()
assert self.db_interface.exist_table(table_name), f'数据库中不存在表 {table_name}'
if factor_name:
columns = self.db_interface.get_columns_names(table_name)
assert factor_name in columns, f'表 {table_name} 中不存在 {factor_name} 列'
class IndexConstitute(Factor):
"""指数成分股权重"""
def __init__(self, db_interface: DBInterface = None):
super().__init__('指数成分股权重', '', db_interface)
def _get_data(self, index_ticker: str, date: DateUtils.DateType):
date_str = DateUtils.date_type2str(date, '-')
stm = f'DateTime = (SELECT MAX(DateTime) FROM `{self.table_name}` WHERE DateTime <= "{date_str}" AND IndexCode = "{index_ticker}")'
ret = self.db_interface.read_table(self.table_name, index_code=index_ticker, text_statement=stm)
ret.index = pd.MultiIndex.from_product([[date], ret.index.get_level_values('ID')])
ret.index.names = ['DateTime', 'ID']
return ret
class NonFinancialFactor(Factor):
"""
非财报数据
"""
def __init__(self, table_name: str, factor_name: str = None, db_interface: DBInterface = None):
super().__init__(table_name, factor_name, db_interface)
self._check_args(table_name, factor_name)
assert not any([it in table_name for it in constants.FINANCIAL_STATEMENTS_TYPE]), \
f'{table_name} 为财报数据, 请使用 FinancialFactor 类!'
def _get_data(self, *args, **kwargs):
raise NotImplementedError()
class CompactFactor(NonFinancialFactor):
"""
数字变动很不平常的特性, 列如复权因子, 行业, 股本 等.
对应的数据库表格为: {'DateTime', 'ID', 'FactorName'} 该类可以缓存以提升效率
"""
def __init__(self, table_name: str, db_interface: DBInterface = None):
super().__init__(table_name, table_name, db_interface)
self.data = self.db_interface.read_table(table_name)
self.calendar = DateUtils.TradingCalendar(db_interface)
def _get_data(self, dates: Union[Sequence[dt.datetime], DateUtils.DateType] = None,
start_date: DateUtils.DateType = None, end_date: DateUtils.DateType = None,
ids: Union[Sequence[str], str] = None, ticker_selector: TickerSelector = None) -> pd.Series:
"""
:param start_date: start date
:param end_date: end date
:param dates: selected dates
:param ids: query stocks
:param ticker_selector: TickerSelector that specifies criteria
:return: pandas.Series with DateTime as index and stock as column
"""
if isinstance(dates, dt.datetime):
dates = [dates]
data = self.data.copy()
if ids:
data = data.loc[(slice(None), ids)]
if dates:
end_date = max(dates)
start_date = min(dates)
if end_date is None:
end_date = dt.datetime.today()
if start_date is None:
start_date = data.index.get_level_values('DateTime').min()
previous_data = data.loc[data.index.get_level_values('DateTime') <= start_date].groupby('ID').tail(1)
index = pd.MultiIndex.from_product([[start_date], previous_data.index.get_level_values('ID')])
previous_data.index = index
ranged_data = data.loc[(data.index.get_level_values('DateTime') > start_date) &
(data.index.get_level_values('DateTime') <= end_date)]
data = pd.concat([previous_data, ranged_data])
date_list = self.calendar.select_dates(start_date=start_date, end_date=end_date)
df = data.unstack().reindex(date_list).ffill()
if dates:
df = df.loc[dates, :]
ret = df.stack()
ret.index.names = ['DateTime', 'ID']
if ticker_selector:
index = ticker_selector.generate_index(dates=dates)
ret = ret.reindex(index)
return ret
class IndustryFactor(CompactFactor):
"""
股票行业分类
"""
def __init__(self, provider: str, level: int, db_interface: DBInterface = None) -> None:
"""
:param db_interface: DB Interface
:param provider: Industry classification data provider
:param level: Level of industry classification
"""
assert 0 < level <= constants.INDUSTRY_LEVEL[provider], f'{provider}行业没有{level}级'
table_name = f'{provider}行业'
super().__init__(table_name, db_interface)
self.name = f'{provider}{level}级行业'
if level != constants.INDUSTRY_LEVEL[provider]:
translation = utils.load_param('industry.json')
new_translation = {}
for key, value in translation[table_name].items():
new_translation[key] = value[f'level_{level}']
self.data = self.data.map(new_translation)
def list_constitutes(self, date: DateUtils.DateType, industry: str) -> List[str]:
"""
获取行业内的股票构成
:param date: 查询日期
:param industry: 行业名称
:return: 行业构成
"""
date_data = self.get_data(dates=date)
data = date_data.loc[date_data == industry]
return data.index.get_level_values('ID').tolist()
@cached_property
def all_industries(self) -> List[str]:
"""所有行业列表"""
return self.data.dropna().unique().tolist()
class OnTheRecordFactor(NonFinancialFactor):
"""
有记录的数据, 例如涨跌停, 一字板, 停牌等
"""
def __init__(self, factor_name: str, db_interface: DBInterface = None):
super().__init__(factor_name, db_interface=db_interface)
self.factor_name = factor_name
def _get_data(self, date: DateUtils.DateType, **kwargs) -> List:
"""
:param date: selected dates
:return: list of IDs on the record
"""
tmp = self.db_interface.read_table(self.table_name, dates=date)
return tmp.index.get_level_values('ID').tolist()
def get_counts(self, start_date: DateUtils.DateType, end_date: DateUtils.DateType,
ids: Sequence[str] = | |
= None
try:
dev = DeviceProxy(dev_name)
except:
self.tango_dev_channels_in_error += 1
tg_dev_chs[dev_name] = dev_data = [dev, CaselessDict()]
dev, attr_data = dev_data
attr_data[attr_name] = channel_data
# get attribute configuration
attr_info = None
if dev is None:
self.tango_channels_info_in_error += 1
else:
try:
tg_attr_info = dev.get_attribute_config_ex(attr_name)[
0]
except:
tg_attr_info = \
self._build_empty_tango_attr_info(channel_data)
self.tango_channels_info_in_error += 1
attr_info = TangoChannelInfo(channel_data, tg_attr_info)
tg_chs_info[channel_name] = dev_name, attr_name, attr_info
def _build_empty_tango_attr_info(self, channel_data):
ret = PyTango.AttributeInfoEx()
ret.name = channel_data['name']
ret.label = channel_data['label']
return ret
def prepare(self):
# first time? build everything
if self.tango_dev_channels is None:
return self._build()
# prepare missing tango devices
if self.tango_dev_channels_in_error > 0:
for dev_name, dev_data in list(self.tango_dev_channels.items()):
if dev_data[0] is None:
try:
dev_data[0] = DeviceProxy(dev_name)
self.tango_dev_channels_in_error -= 1
except:
pass
# prepare missing tango attribute configuration
if self.tango_channels_info_in_error > 0:
for _, attr_data in list(self.tango_channels_info.items()):
dev_name, attr_name, attr_info = attr_data
if attr_info.has_info():
continue
dev = self.tango_dev_channels[dev_name]
if dev is None:
continue
try:
tg_attr_info = dev.get_attribute_config_ex(attr_name)[0]
attr_info.set_info(tg_attr_info)
self.tango_channels_info_in_error -= 1
except:
pass
def getChannels(self):
return self.channel_list
def getChannelInfo(self, channel_name):
try:
return self.tango_channels_info[channel_name]
except Exception:
channel_name = channel_name.lower()
for d_name, a_name, ch_info in \
list(self.tango_channels_info.values()):
if ch_info.name.lower() == channel_name:
return d_name, a_name, ch_info
def getChannelsInfo(self, only_enabled=False):
"""Returns information about the channels present in the measurement
group in a form of dictionary, where key is a channel name and value is
a tuple of three elements:
- device name
- attribute name
- attribute information or None if there was an error trying to get
the information
:param only_enabled: flag to filter out disabled channels
:type only_enabled: bool
:return: dictionary with channels info
:rtype: dict<str, tuple<str, str, TangoChannelInfo>>
"""
self.prepare()
ret = CaselessDict(self.tango_channels_info)
ret.update(self.non_tango_channels)
for ch_name, (_, _, ch_info) in list(ret.items()):
if only_enabled and not ch_info.enabled:
ret.pop(ch_name)
return ret
def getChannelsInfoList(self, only_enabled=False):
"""Returns information about the channels present in the measurement
group in a form of ordered, based on the channel index, list.
:param only_enabled: flag to filter out disabled channels
:type only_enabled: bool
:return: list with channels info
:rtype: list<TangoChannelInfo>
"""
channels_info = self.getChannelsInfo(only_enabled=only_enabled)
ret = []
for _, (_, _, ch_info) in list(channels_info.items()):
ret.append(ch_info)
ret = sorted(ret, key=lambda x: x.index)
return ret
def getCountersInfoList(self):
channels_info = self.getChannelsInfoList()
timer_name, idx = self.timer, -1
for i, ch in enumerate(channels_info):
if ch['full_name'] == timer_name:
idx = i
break
if idx >= 0:
channels_info.pop(idx)
return channels_info
def getTangoDevChannels(self, only_enabled=False):
"""Returns Tango channels (attributes) that could be used to read
measurement group results in a form of dict where key is a device name
and value is a list with two elements:
- A device proxy or None if there was an error building it
- A dict where keys are attribute names and value is a reference to
a dict representing channel data as received in raw data
:param only_enabled: flag to filter out disabled channels
:type only_enabled: bool
:return: dict with Tango channels
:rtype: dict<str, list[DeviceProxy, CaselessDict<str, dict>]>
"""
if not only_enabled:
return self.tango_dev_channels
tango_dev_channels = {}
for dev_name, dev_data in list(self.tango_dev_channels.items()):
dev_proxy, attrs = dev_data[0], copy.deepcopy(dev_data[1])
for attr_name, channel_data in list(attrs.items()):
if not channel_data["enabled"]:
attrs.pop(attr_name)
tango_dev_channels[dev_name] = [dev_proxy, attrs]
return tango_dev_channels
def read(self, parallel=True):
if parallel:
return self._read_parallel()
return self._read()
def _read_parallel(self):
self.prepare()
ret = CaselessDict(self.cache)
dev_replies = {}
# deposit read requests
tango_dev_channels = self.getTangoDevChannels(only_enabled=True)
for _, dev_data in list(tango_dev_channels.items()):
dev, attrs = dev_data
if dev is None:
continue
try:
dev_replies[dev] = dev.read_attributes_asynch(
list(attrs.keys())), attrs
except Exception:
dev_replies[dev] = None, attrs
# gather all replies
for dev, reply_data in list(dev_replies.items()):
reply, attrs = reply_data
try:
data = dev.read_attributes_reply(reply, 0)
for data_item in data:
channel_data = attrs[data_item.name]
if data_item.has_failed:
value = None
else:
value = data_item.value
ret[channel_data['full_name']] = value
except Exception:
for _, channel_data in list(attrs.items()):
ret[channel_data['full_name']] = None
return ret
def _read(self):
self.prepare()
ret = CaselessDict(self.cache)
tango_dev_channels = self.getTangoDevChannels(only_enabled=True)
for _, dev_data in list(tango_dev_channels.items()):
dev, attrs = dev_data
try:
data = dev.read_attributes(list(attrs.keys()))
for data_item in data:
channel_data = attrs[data_item.name]
if data_item.has_failed:
value = None
else:
value = data_item.value
ret[channel_data['full_name']] = value
except Exception:
for _, channel_data in list(attrs.items()):
ret[channel_data['full_name']] = None
return ret
class MeasurementGroup(PoolElement):
""" Class encapsulating MeasurementGroup functionality."""
def __init__(self, name, **kw):
"""PoolElement initialization."""
self._configuration = None
self._channels = None
self._last_integ_time = None
self.call__init__(PoolElement, name, **kw)
self.__cfg_attr = self.getAttribute('configuration')
self.__cfg_attr.addListener(self.on_configuration_changed)
self._value_buffer_cb = None
self._value_buffer_channels = None
codec_name = getattr(sardanacustomsettings, "VALUE_BUFFER_CODEC")
self._value_buffer_codec = CodecFactory().getCodec(codec_name)
self._value_ref_buffer_cb = None
self._value_ref_buffer_channels = None
codec_name = getattr(sardanacustomsettings, "VALUE_REF_BUFFER_CODEC")
self._value_ref_buffer_codec = CodecFactory().getCodec(codec_name)
def cleanUp(self):
PoolElement.cleanUp(self)
f = self.factory()
f.removeExistingAttribute(self.__cfg_attr)
def _create_str_tuple(self):
channel_names = ", ".join(self.getChannelNames())
return self.getName(), self.getTimerName(), channel_names
def getConfigurationAttrEG(self):
return self._getAttrEG('Configuration')
def setConfiguration(self, configuration):
codec = CodecFactory().getCodec('json')
f, data = codec.encode(('', configuration))
self.write_attribute('configuration', data)
def _setConfiguration(self, data):
self._configuration = MGConfiguration(self, data)
def getConfiguration(self, force=False):
if force or self._configuration is None:
data = self.getConfigurationAttrEG().readValue(force=True)
self._setConfiguration(data)
return self._configuration
def on_configuration_changed(self, evt_src, evt_type, evt_value):
if evt_type not in CHANGE_EVT_TYPES:
return
self.info("Configuration changed")
self._setConfiguration(evt_value.value)
def getTimerName(self):
return self.getTimer()['name']
def getTimer(self):
cfg = self.getConfiguration()
return cfg.channels[cfg.timer]
def getTimerValue(self):
return self.getTimerName()
def getMonitorName(self):
return self.getMonitor()['name']
def getMonitor(self):
cfg = self.getConfiguration()
return cfg.channels[cfg.monitor]
def setTimer(self, timer_name):
try:
self.getChannel(timer_name)
except KeyError:
raise Exception("%s does not contain a channel named '%s'"
% (str(self), timer_name))
cfg = self.getConfiguration().raw_data
cfg['timer'] = timer_name
import json
self.write_attribute("configuration", json.dumps(cfg))
def getChannels(self):
return self.getConfiguration().getChannels()
def getCounters(self):
cfg = self.getConfiguration()
return [c for c in self.getChannels() if c['full_name'] != cfg.timer]
def getChannelNames(self):
return [ch['name'] for ch in self.getChannels()]
def getCounterNames(self):
return [ch['name'] for ch in self.getCounters()]
def getChannelLabels(self):
return [ch['label'] for ch in self.getChannels()]
def getCounterLabels(self):
return [ch['label'] for ch in self.getCounters()]
def getChannel(self, name):
return self.getConfiguration().channels[name]
def getChannelInfo(self, name):
return self.getConfiguration().getChannelInfo(name)
def getChannelsInfo(self):
return self.getConfiguration().getChannelsInfoList()
def getChannelsEnabledInfo(self):
"""Returns information about **only enabled** channels present in the
measurement group in a form of ordered, based on the channel index,
list.
:return: list with channels info
:rtype: list<TangoChannelInfo>
"""
return self.getConfiguration().getChannelsInfoList(only_enabled=True)
def getCountersInfo(self):
return self.getConfiguration().getCountersInfoList()
def getValues(self, parallel=True):
return self.getConfiguration().read(parallel=parallel)
def getValueBuffers(self):
value_buffers = []
for channel_info in self.getChannels():
channel = Device(channel_info["full_name"])
value_buffers.append(channel.getValueBuffer())
return value_buffers
def getIntegrationTime(self):
return self._getAttrValue('IntegrationTime')
def getIntegrationTimeObj(self):
return self._getAttrEG('IntegrationTime')
def setIntegrationTime(self, ctime):
self.getIntegrationTimeObj().write(ctime)
def putIntegrationTime(self, ctime):
if self._last_integ_time == ctime:
return
self._last_integ_time = ctime
self.getIntegrationTimeObj().write(ctime)
def getAcquisitionModeObj(self):
return self._getAttrEG('AcquisitionMode')
def getAcquisitionMode(self):
return self._getAttrValue('AcquisitionMode')
def setAcquisitionMode(self, acqMode):
self.getAcquisitionModeObj().write(acqMode)
def getSynchronizationObj(self):
return self._getAttrEG('Synchronization')
def getSynchronization(self):
return self._getAttrValue('Synchronization')
def setSynchronization(self, synchronization):
codec = CodecFactory().getCodec('json')
_, data = codec.encode(('', synchronization))
self.getSynchronizationObj().write(data)
self._last_integ_time = None
# NbStarts Methods
def getNbStartsObj(self):
return self._getAttrEG('NbStarts')
def setNbStarts(self, starts):
self.getNbStartsObj().write(starts)
def getNbStarts(self):
return self._getAttrValue('NbStarts')
def getMoveableObj(self):
return self._getAttrEG('Moveable')
def getMoveable(self):
return self._getAttrValue('Moveable')
def getLatencyTimeObj(self):
return self._getAttrEG('LatencyTime')
def getLatencyTime(self):
return self._getAttrValue('LatencyTime')
def setMoveable(self, moveable=None):
if moveable is None:
moveable = 'None' # Tango attribute is of type DevString
self.getMoveableObj().write(moveable)
def valueBufferChanged(self, channel, value_buffer):
"""Receive value buffer updates, pre-process them, and call
the subscribed callback.
:param channel: channel that reports value buffer update
:type channel: ExpChannel
:param value_buffer: json encoded value buffer update, it contains
at least values and indexes
:type value_buffer: :obj:`str`
"""
if value_buffer is None:
return
_, value_buffer = self._value_buffer_codec.decode(value_buffer)
values = value_buffer["value"]
if isinstance(values[0], list):
np_values = list(map(numpy.array, values))
value_buffer["value"] = np_values
self._value_buffer_cb(channel, value_buffer)
def subscribeValueBuffer(self, cb=None):
"""Subscribe to channels' value buffer update events. If no
callback is passed, the default channel's callback is subscribed which
will store the data in the channel's value_buffer attribute.
:param cb: callback to be subscribed, None means subscribe the default
channel's callback
:type cb: callable
"""
self._value_buffer_channels = []
for channel_info in self.getChannels():
full_name = channel_info["full_name"]
value_ref_enabled = channel_info.get("value_ref_enabled", False)
# Use DeviceProxy instead of taurus to avoid crashes in Py3
# See: tango-controls/pytango#292
if _is_referable(full_name) and value_ref_enabled:
continue
channel = Device(full_name)
value_buffer_obj = channel.getValueBufferObj()
if cb is not None:
self._value_buffer_cb = cb
value_buffer_obj.subscribeEvent(self.valueBufferChanged,
channel, False)
else:
value_buffer_obj.subscribeEvent(channel.valueBufferChanged,
with_first_event=False)
self._value_buffer_channels.append(channel)
def unsubscribeValueBuffer(self, cb=None):
"""Unsubscribe from channels' value buffer events. If no callback is
passed, unsubscribe the channel's default callback.
:param cb: callback to be unsubscribed, None means unsubscribe the
default channel's callback
:type cb: callable
"""
for channel_info in self.getChannels():
full_name = channel_info["full_name"]
value_ref_enabled = | |
<filename>SLU/fairseq/fairseq/tasks/TArCMultiTask.py
# Code for MultiTask TArC annotation/collection
import os
import re
import sys
import csv
import glob
import torch
from fairseq.tarc_utils import *
from fairseq.data import Dictionary, TarcMultiTaskDataset
from fairseq.tasks import FairseqTask, register_task
from typing import Dict, List
LOSS_INIT_VALUE=999999.9
ER_INIT_VALUE=LOSS_INIT_VALUE
CLS_COL_IDX=1
POS_COL_IDX=4
def set_pos_col_idx(args):
if args.sub_task == 'tarc-ext':
return 3
elif args.sub_task == 'tarc-full':
return 4
elif args.sub_task == 'madar-trs-ex':
return 2
elif args.sub_task == 'madar-trs-full':
return 3
elif args.sub_task == 'tarc-full-npos':
return 5
else:
return 9999999
# Define some utility functions for reading the TArC multi-task data format (tabular format)
def process_morpho(tag):
res_tags = []
for c in tag:
res_tags.append( '@' + c )
return res_tags
def append_tags( tags, sep ):
res_tags = []
for i in range(len(tags)):
if i == 0:
res_tags.append( tags[i] )
else:
res_tags.append( sep + tags[i] )
return res_tags
def process_micro_pos(tag):
res_tags = []
if tag != '_':
micro_tags = tag.split('_')
res_tags.extend( append_tags( micro_tags[:-1], '_' ) )
if len(micro_tags) >= 2 and (micro_tags[-2] == 'ADJ' or micro_tags[-2] == 'PRON') and micro_tags[-1][0] in ['1', '2', '3']:
morpho_tags = process_morpho( micro_tags[-1] )
morpho_tags[0] = morpho_tags[0].replace('@', '_')
res_tags.extend( morpho_tags )
else:
if micro_tags[-1] == 'SG' or micro_tags[-1] == 'PL':
res_tags.append( '_' + micro_tags[-1] )
else:
if micro_tags[-1] != ':':
nano_tags = micro_tags[-1].split(':')
if len(nano_tags) > 1:
if len(nano_tags) != 2:
sys.stderr.write(' - Micro POS error: {}\n'.format(micro_tags[-1]))
sys.exit(1)
res_tags.append( '_' + nano_tags[0] )
morpho_tags = process_morpho(nano_tags[1])
morpho_tags[0] = morpho_tags[0].replace('@', ':')
res_tags.extend( morpho_tags )
else:
if len(micro_tags) > 1:
res_tags.append( '_' + micro_tags[-1] )
else:
res_tags.append( micro_tags[-1] )
else:
res_tags.append( micro_tags[-1] )
else:
res_tags.append( tag )
return res_tags
def process_suffix_pos(tag):
res_tags = []
if tag != '-':
tag_tokens = tag.split('-')
if len(tag_tokens) > 1:
for i in range(len(tag_tokens)):
t = tag_tokens[i]
if len(t) > 2 and t[:2] == 'IV' and not (len(t) > 6 and t[:6] == 'IVSUFF'):
res_tags.append('IV')
morpho_tags = process_morpho( t[2:] )
res_tags.extend( morpho_tags )
else:
suffix_tokens = process_micro_pos(t)
if i > 0:
suffix_tokens[0] = '-' + suffix_tokens[0]
res_tags.extend( suffix_tokens )
else:
res_tags.extend( process_micro_pos(tag) )
else:
res_tags.append(tag)
return res_tags
def process_macro_pos(tag):
res_tags = []
if tag != '+':
tag_tokens = tag.split('+')
for i in range(len(tag_tokens)):
t = tag_tokens[i]
micro_pos = process_suffix_pos(t)
if i > 0:
res_tags.append('+')
res_tags.extend(micro_pos)
else:
res_tags.append(tag)
return res_tags
def process_pos_tag(tag, args, pad_flag=True, rep_flag=False):
res_tags = []
pos_tokens = tag.split(']')
if len(pos_tokens) > 1:
if len(pos_tokens) != 2:
sys.stderr.write(' - POS format error (splitting at ]): {}\n'.format(tag))
sys.exit(1)
pos_pos_tokens = pos_tokens[0].split('[')
if len(pos_pos_tokens) > 1 and len(pos_pos_tokens[0]) > 0: # Case like 'IV2S-IV+[PREP+PRON_1S]IVSUFF_IO:1S'
if len(pos_pos_tokens) != 2:
sys.stderr.write(' - POS format error (splitting at [): {}\n'.format(tag))
sys.exit(1)
pref = pos_pos_tokens[0]
pref_tokens = process_macro_pos( pref )
infix = pos_pos_tokens[1]
infix_tokens = process_macro_pos( infix )
infix_tokens = ['+['] + infix_tokens + [']']
post = pos_tokens[1]
post_tokens = process_macro_pos( post )
res_tags.extend(pref_tokens)
res_tags.extend( infix_tokens )
res_tags.extend( post_tokens )
elif len(pos_pos_tokens) > 1: # Case like '[NOUN_QUANT]ADV'
if len(pos_pos_tokens) != 2:
sys.stderr.write(' - POS format error (splitting at [, with first part empty): {}\n'.format(tag))
sys.exit(1)
pref = pos_pos_tokens[1]
pref_tokens = process_macro_pos( pref )
pref_tokens = ['['] + pref_tokens + [']']
post = pos_tokens[1]
post_tokens = process_macro_pos( post )
res_tags.extend( pref_tokens )
res_tags.extend( post_tokens )
else:
sys.stderr.write(' - POS format error, possibly unbalanced [] at pos {}\n'.format(tag))
sys.exit(1)
else: # "Normal" case (normal for Arabic people...)
pos_tokens = process_macro_pos( tag )
res_tags.extend( pos_tokens )
if pad_flag:
return [start_token] + res_tags + [end_token]
else:
return res_tags
def create_character_list(token, args, pad_flag=True, rep_flag=False):
if (not (token in fillers + latin_fillers)) and rep_flag and args.sub_task in ['tarc-ext', 'tarc-full', 'tarc-full-npos']:
token = replace_all_num(token)
token = replace_all_pun(token)
token = replace_all_sym(token)
elif (not (token in fillers + latin_fillers)) and args.sub_task in ['tarc-ext', 'tarc-full', 'tarc-full-npos']:
token = replace_all_Lpun(token)
token = replace_all_Lsym(token)
tok_lst = []
tok_idx = 0
for t in token.split():
if tok_idx > 0:
tok_lst.append( space_token )
for c in t:
tok_lst.append( c )
tok_idx += 1
if args.sub_task in ['tarc-ext', 'tarc-full', 'tarc-full-npos']:
seq_fillers = detect_fillers(tok_lst, fillers + latin_fillers)
if len(seq_fillers) > 0:
new_lst = []
prev_start = 0
for t in seq_fillers:
new_lst = new_lst + tok_lst[prev_start:t[0]] + [''.join(tok_lst[t[0]:t[1]])]
prev_start = t[1]
new_lst = new_lst + tok_lst[t[1]:]
tok_lst = new_lst
if pad_flag:
return [start_token] + tok_lst + [end_token]
else:
return tok_lst
def process_tokenization( token, args, pad_flag=True, rep_flag=False ):
res_tok = []
if token != '+':
tok_tokens = token.split('+')
if len(tok_tokens) > 1:
res_tok.append(start_token)
for i in range(len(tok_tokens)):
t = tok_tokens[i]
if len(t) > 0:
if i > 0:
res_tok.append( '+' )
res_tok.extend( create_character_list( t, args, pad_flag=False, rep_flag=rep_flag ) )
else:
sys.stderr.write('process_tokenization FORMAT ERROR: found empy token splitting on + >{}< (probably you must replace multiple + in your data)\n'.format(token))
sys.exit(1)
res_tok.append(end_token)
else:
res_tok.extend( create_character_list( token, args, pad_flag=True, rep_flag=rep_flag ) )
else:
res_tok.append(start_token)
res_tok.append(token)
res_tok.append(end_token)
return res_tok
def tiger_label_processing(label, args, pad_flag=True, rep_flag=False):
if label != '$.':
label_tokens = label.split('.')
else:
label_tokens = [label]
if pad_flag:
return [start_token] + label_tokens + [end_token]
else:
return label_tokens
def no_processing(token, args, pad_flag=True, rep_flag=False):
return [token]
madar_translation = (create_character_list, no_processing, create_character_list)
madar_ex_translation = (create_character_list, no_processing, process_pos_tag, create_character_list)
madar_full_translation = (create_character_list, no_processing, create_character_list, process_pos_tag, create_character_list)
tarc_processing_base = (create_character_list, process_tokenization, process_pos_tag) # Processing functions for raw tunisian, tokenized tunisian and POS
tarc_processing_ext = (create_character_list, no_processing, process_tokenization, process_pos_tag) # Processing functions for arabish, tunisian, tokenized tunisian and POS
tarc_processing_full = (create_character_list, no_processing, create_character_list, process_tokenization, process_pos_tag)
tarc_processing_full_npos = (create_character_list, no_processing, no_processing, create_character_list, process_tokenization, process_pos_tag)
tarc_substep_1 = (create_character_list, no_processing, create_character_list)
tarc_substep_2 = (create_character_list, process_tokenization)
tarc_substep_3 = (process_tokenization, process_pos_tag)
tiger_mt_processing = (create_character_list, no_processing, no_processing)
tiger_mtext_processing = (create_character_list, no_processing, tiger_label_processing)
tiger4_mt_processing = (create_character_list, no_processing, no_processing, no_processing)
# NOTE: used to decide if tokens and chars representations must be computed and merged when their usage is specified from command line
granularity_merging_flags = {}
granularity_merging_flags['tiger-mt'] = (True, False, False)
granularity_merging_flags['tiger-mt-ext'] = (True, False, True)
granularity_merging_flags['tiger4-mt'] = (True, False, False, False)
granularity_merging_flags['madar-trs'] = (True, False, True)
granularity_merging_flags['madar-trs-ex'] = (True, False, True, True)
granularity_merging_flags['madar-trs-full'] = (True, False, True, True, True)
granularity_merging_flags['tarc-base'] = (True, False, True)
granularity_merging_flags['tarc-ext'] = (True, False, True, True)
granularity_merging_flags['tarc-full'] = (True, False, True, True, True)
granularity_merging_flags['tarc-full-npos'] = (True, False, False, True, True, True)
granularity_merging_flags['tarc-substep1'] = (True, False, True)
granularity_merging_flags['tarc-substep2'] = (True, True)
granularity_merging_flags['tarc-substep3'] = (True, True)
def choose_column_processing(num_columns, args):
column_processing = None
if args.sub_task == 'tiger-mt':
print(' - TArCMultiTask, processing mode set to tiger-mt')
sys.stdout.flush()
column_processing = tiger_mt_processing
elif args.sub_task == 'tiger-mt-ext':
print(' - TArCMultiTask, processing mode set to tiger-mt-ext')
sys.stdout.flush()
column_processing = tiger_mtext_processing
elif args.sub_task == 'tiger4-mt':
print(' - TArCMultiTask, processing mode set to tiger4-mt')
sys.stdout.flush()
column_processing = tiger4_mt_processing
elif args.sub_task == 'madar-trs':
print(' - TArCMultiTask, processing mode set to madar-trs')
sys.stdout.flush()
column_processing = madar_translation
elif args.sub_task == 'madar-trs-ex':
print(' - TArCMultiTask, processing mode set to madar-trs-ex')
sys.stdout.flush()
column_processing = madar_ex_translation
elif args.sub_task == 'madar-trs-full':
print(' - TArCMultiTask, processing mode set to madar-trs-full')
sys.stdout.flush()
column_processing = madar_full_translation
elif args.sub_task == 'tarc-base':
print(' - TArCMultiTask, processing mode set to tarc-base')
sys.stdout.flush()
column_processing = tarc_processing_base
elif args.sub_task == 'tarc-ext':
print(' - TArCMultiTask, processing mode set to tarc-ext')
sys.stdout.flush()
column_processing = tarc_processing_ext
elif args.sub_task == 'tarc-full':
print(' - TArCMultiTask, processing mode set to tarc-full')
sys.stdout.flush()
column_processing = tarc_processing_full
elif args.sub_task == 'tarc-full-npos':
print(' - TArCMultiTask, processing mode set to tarc-full-npos')
column_processing = tarc_processing_full_npos
elif args.sub_task == 'tarc-substep1':
print(' - TArCMultiTask, processing mode set to tarc-substep1')
sys.stdout.flush()
column_processing = tarc_substep_1
elif args.sub_task == 'tarc-substep2':
print(' - TArCMultiTask, processing mode set to tarc-substep2')
sys.stdout.flush()
column_processing = tarc_substep_2
elif args.sub_task == 'tarc-substep3':
print(' - TArCMultiTask, processing mode set to tarc-substep3')
sys.stdout.flush()
column_processing = tarc_substep_3
elif args.sub_task == 'base':
print(' - TArCMultiTask, processing mode set to base')
sys.stdout.flush()
column_processing = [no_processing for i in range(num_columns)]
else:
print(' - TArCMultiTask, setting default processing mode (no processing)')
sys.stdout.flush()
column_processing = [no_processing for i in range(num_columns)]
return column_processing
def check_column_processing(num_columns, args):
if num_columns == 3 and not args.sub_task in ['tiger-mt', 'tiger-mt-ext', 'tarc-base', 'base', 'madar-trs', 'tarc-substep1']:
raise ValueError(' wrong num. of columns in input data for processing mode {}'.format(args.sub_task))
elif num_columns == 2 and not args.sub_task in ['tarc-substep2', 'tarc-substep3', 'base']:
raise ValueError(' 2 columns are expected with sub-task tarc-substep2|3')
elif num_columns == 4 and not args.sub_task in ['tiger4-mt', 'tarc-ext', 'base','madar-trs-ex']:
raise | |
copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def _roll_one_dim(self, dim, count):
axis = self.get_axis_num(dim)
count %= self.shape[axis]
if count != 0:
indices = [slice(-count, None), slice(None, -count)]
else:
indices = [slice(None)]
arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]
data = duck_array_ops.concatenate(arrays, axis)
if is_duck_dask_array(data):
# chunked data should come out with the same chunks; this makes
# it feasible to combine shifted and unshifted data
# TODO: remove this once dask.array automatically aligns chunks
data = data.rechunk(self.data.chunks)
return type(self)(self.dims, data, self._attrs, fastpath=True)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
@property
def T(self) -> "Variable":
return self.transpose()
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def _stack_once(self, dims, new_dim):
if not set(dims) <= set(self.dims):
raise ValueError("invalid existing dimensions: %s" % dims)
if new_dim in self.dims:
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if len(dims) == 0:
# don't stack
return self.copy(deep=False)
other_dims = [d for d in self.dims if d not in dims]
dim_order = other_dims + list(dims)
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + (-1,)
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + (new_dim,)
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def _unstack_once(self, dims, old_dim):
new_dim_names = tuple(dims.keys())
new_dim_sizes = tuple(dims.values())
if old_dim not in self.dims:
raise ValueError("invalid existing dimension: %s" % old_dim)
if set(new_dim_names).intersection(self.dims):
raise ValueError(
"cannot create a new dimension with the same "
"name as an existing dimension"
)
if np.prod(new_dim_sizes) != self.sizes[old_dim]:
raise ValueError(
"the product of the new dimension sizes must "
"equal the size of the old dimension"
)
other_dims = [d for d in self.dims if d != old_dim]
dim_order = other_dims + [old_dim]
reordered = self.transpose(*dim_order)
new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes
new_data = reordered.data.reshape(new_shape)
new_dims = reordered.dims[: len(other_dims)] + new_dim_names
return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def fillna(self, value):
return ops.fillna(self, value)
def where(self, cond, other=dtypes.NA):
return ops.where_method(self, cond, other)
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = | |
ac.showCalltipsForce
@cmd('show-calltips-force')
def showCalltipsForce(self, event=None):
'''Show the calltips at the cursor, even if calltips are not presently enabled.'''
return self.showCalltips(event, force=True)
#@+node:ekr.20061031131434.15: *4* ac.showAutocompleter/CalltipsStatus
def showAutocompleterStatus(self):
'''Show the autocompleter status.'''
k = self.k
if not g.unitTesting:
s = 'autocompleter %s' % (
'On' if k.enable_autocompleter else 'Off')
g.red(s)
def showCalltipsStatus(self):
'''Show the autocompleter status.'''
k = self.k
if not g.unitTesting:
s = 'calltips %s' % 'On' if k.enable_calltips else 'Off'
g.red(s)
#@+node:ekr.20061031131434.16: *3* ac.Helpers
#@+node:ekr.20110512212836.14469: *4* ac.exit
def exit(self):
trace = False and not g.unitTesting
if trace: g.trace(g.callers())
c = self.c
w = self.w or c.frame.body.wrapper
if trace: g.trace(g.callers())
c.k.keyboardQuit()
if self.use_qcompleter:
if self.qw:
self.qw.end_completer()
self.qw = None # Bug fix: 2013/09/24.
else:
for name in (self.tabName, 'Modules', 'Info'):
c.frame.log.deleteTab(name)
# Restore the selection range that may have been destroyed by changing tabs.
c.widgetWantsFocusNow(w)
i, j = w.getSelectionRange()
w.setSelectionRange(i, j, insert=j)
# Was in finish.
c.frame.body.onBodyChanged('Typing')
c.recolor()
finish = exit
abort = exit
#@+node:ekr.20061031131434.18: *4* ac.append/begin/popTabName
def appendTabName(self, word):
self.setTabName(self.tabName + '.' + word)
def beginTabName(self, word):
self.setTabName('AutoComplete ' + word)
def clearTabName(self):
self.setTabName('AutoComplete ')
def popTabName(self):
s = self.tabName
i = s.rfind('.', 0, -1)
if i > -1:
self.setTabName(s[0: i])
# Underscores are not valid in Pmw tab names!
def setTabName(self, s):
c = self.c
if self.tabName:
c.frame.log.deleteTab(self.tabName)
self.tabName = s.replace('_', '') or ''
c.frame.log.clearTab(self.tabName)
#@+node:ekr.20110509064011.14556: *4* ac.attr_matches
def attr_matches(self, s, namespace):
"""Compute matches when string s is of the form name.name....name.
Evaluates s using eval(s,namespace)
Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in
the namespace, it will be evaluated and its attributes (as revealed by
dir()) are used as possible completions.
For class instances, class members are are also considered.)
**Warning**: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
trace = False and not g.unitTesting
verbose = False
# Seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", s)
if not m:
return []
expr, attr = m.group(1, 3)
try:
safe_expr = self.strip_brackets(expr)
obj = eval(safe_expr, namespace)
except Exception:
return []
# Build the result.
words = dir(obj)
n = len(attr)
result = ["%s.%s" % (expr, w) for w in words if w[: n] == attr]
if trace:
if verbose:
g.trace(s, result)
else:
g.trace(repr(s))
return result
#@+node:ekr.20061031131434.11: *4* ac.auto_completer_state_handler
def auto_completer_state_handler(self, event):
'''Handle all keys while autocompleting.'''
trace = False and not g.app.unitTesting
c, k, tag = self.c, self.k, 'auto-complete'
state = k.getState(tag)
ch = event.char if event else ''
stroke = event.stroke if event else ''
is_plain = k.isPlainKey(stroke)
if trace: g.trace('state: %s, ch: %s, stroke: %s' % (
state, repr(ch), repr(stroke)))
if state == 0:
c.frame.log.clearTab(self.tabName)
common_prefix, prefix, tabList = self.compute_completion_list()
if tabList:
k.setState(tag, 1, handler=self.auto_completer_state_handler)
else:
if trace: g.trace('abort: not tabList')
self.exit()
elif ch in ('\n', 'Return'):
self.exit()
elif ch == 'Escape':
self.exit()
elif ch in ('\t', 'Tab'):
self.compute_completion_list()
elif ch in ('\b', 'BackSpace'):
self.do_backspace()
elif ch == '.':
self.insert_string('.')
self.compute_completion_list()
elif ch == '?':
self.info()
elif ch == '!':
# Toggle between verbose and brief listing.
self.verbose = not self.verbose
kind = 'ON' if self.verbose else 'OFF'
message = 'verbose completions %s' % (kind)
g.es_print(message)
# This doesn't work because compute_completion_list clears the autocomplete tab.
# self.put('', message, tabName=self.tabName)
# This is almost invisible: the fg='red' is not honored.
c.frame.putStatusLine(message, fg='red')
self.compute_completion_list()
# elif ch == 'Down' and hasattr(self,'onDown'):
# self.onDown()
# elif ch == 'Up' and hasattr(self,'onUp'):
# self.onUp()
elif is_plain and ch and ch in string.printable:
if trace: g.trace('plain: %r' % (ch))
self.insert_general_char(ch)
else:
if stroke == k.autoCompleteForceKey:
# This is probably redundant because completions will exist.
# However, it doesn't hurt, and it may be useful rarely.
if trace: g.trace('auto-complete-force', repr(stroke))
common_prefix, prefix, tabList = self.compute_completion_list()
if tabList:
self.show_completion_list(common_prefix, prefix, tabList)
else:
g.warning('No completions')
self.exit()
return None
else:
if trace: g.trace('ignore non plain key', repr(stroke), g.callers())
self.abort() # 2011/06/17.
return 'do-standard-keys'
#@+node:ekr.20061031131434.20: *4* ac.calltip & helpers
def calltip(self):
'''Show the calltips for the present prefix.
ch is '(' if the user has just typed it.
'''
obj, prefix = self.get_object()
if obj:
self.calltip_success(prefix, obj)
else:
self.calltip_fail(prefix)
self.exit()
#@+node:ekr.20110512090917.14468: *5* ac.calltip_fail
def calltip_fail(self, prefix):
'''Evaluation of prefix failed.'''
trace = False and not g.unitTesting
if trace:
g.es('eval failed for "%s"' % repr(prefix))
self.insert_string('(')
#@+node:ekr.20110512090917.14469: *5* ac.calltip_success
def calltip_success(self, prefix, obj):
trace = False and not g.unitTesting
try:
# Get the parenthesized argument list.
s1, s2, s3, s4 = inspect.getargspec(obj)
s = inspect.formatargspec(s1, s2, s3, s4)
if trace: g.trace(obj, repr(s))
except Exception:
if trace: g.trace('inspect failed. obj: %s' % (obj))
self.insert_string('(')
return
# Clean s and insert it: don't include the opening "(".
if g.match(s, 1, 'self,'):
s = s[6:].strip()
elif g.match_word(s, 1, 'self'):
s = s[5:].strip()
else:
s = s[1:].strip()
self.insert_string("(", select=False)
self.insert_string(s, select=True)
#@+node:ekr.20061031131434.28: *4* ac.compute_completion_list & helper
def compute_completion_list(self):
'''Return the autocompleter completion list.'''
trace = False and not g.unitTesting
verbose = False
# True: report hits and misses.
# False: report misses.
prefix = self.get_autocompleter_prefix()
key, options = self.get_cached_options(prefix)
if options:
if trace and verbose: g.trace('**prefix hit: %s, %s' % (prefix, key))
else:
if trace: g.trace('**prefix miss: %s, %s' % (prefix, key))
options = self.get_completions(prefix)
tabList, common_prefix = g.itemsMatchingPrefixInList(
prefix, options, matchEmptyPrefix=False)
if not common_prefix:
tabList, common_prefix = g.itemsMatchingPrefixInList(
prefix, options, matchEmptyPrefix=True)
if trace and verbose:
g.trace('prefix: %s, common: %s, len(tabList): %s' % (
repr(prefix), repr(common_prefix), len(tabList)))
# if verbose: g.trace('options[:10]...\n',
# g.listToString(options[:10],sort=True))
if tabList:
self.show_completion_list(common_prefix, prefix, tabList)
return common_prefix, prefix, tabList
#@+node:ekr.20110514051607.14524: *5* ac.get_cached_options
def get_cached_options(self, prefix):
trace = False and not g.unitTesting
d = self.completionsDict
# Search the completions Dict for shorter and shorter prefixes.
i = len(prefix)
while i > 0:
key = prefix[: i]
i -= 1
# Make sure we report hits only of real objects.
if key.endswith('.'):
if trace: g.trace('== period: %s' % (key))
return key, []
options = d.get(key)
if options:
if trace: g.trace('== hit: %s len: %s' % (
key, len(options)))
return key, options
else:
if trace: g.trace('== miss: %s' % (key))
return None, []
#@+node:ekr.20061031131434.29: *4* ac.do_backspace
def do_backspace(self):
'''Delete the character and recompute the completion list.'''
c, w = self.c, self.w
c.bodyWantsFocusNow()
i = w.getInsertPoint()
if i <= 0:
self.exit()
return
w.delete(i - 1, i)
w.setInsertPoint(i - 1)
if i <= 1:
self.exit()
else:
# Update the list. Abort if there is no prefix.
common_prefix, prefix, tabList = self.compute_completion_list()
if not prefix:
self.exit()
#@+node:ekr.20110510133719.14548: *4* ac.do_qcompleter_tab (not used)
def do_qcompleter_tab(self, prefix, options):
'''Return the longest common prefix of all the options.'''
trace = False and not g.unitTesting
matches, common_prefix = g.itemsMatchingPrefixInList(
prefix, options, matchEmptyPrefix=False)
if trace: g.trace(repr(common_prefix))
return common_prefix
#@+node:ekr.20110509064011.14561: *4* ac.get_autocompleter_prefix
def get_autocompleter_prefix(self):
trace = False and not g.unitTesting
# Only the body pane supports auto-completion.
w = self.c.frame.body.wrapper
s = w.getAllText()
if not s: return ''
i = w.getInsertPoint() - 1
i1 = i = j = max(0, i)
while i >= 0 and (s[i].isalnum() or s[i] in '._'):
i -= 1
i += 1
j += 1
prefix = s[i: j]
if trace: g.trace(repr(prefix), 'ins', s[i1:])
return prefix
#@+node:ekr.20110512212836.14471: *4* ac.get_completions & helpers
def get_completions(self, prefix):
trace = False and not g.unitTesting
verbose = False # True: report hits and misses. False: report misses.
d = self.completionsDict
# Precompute the codewise completions for '.self'.
if not self.codewiseSelfList:
aList = self.get_codewise_completions('self.')
self.codewiseSelfList = [z[5:] for z in aList]
d['self.'] = self.codewiseSelfList
# Use the cached list if it exists.
aList = d.get(prefix)
if aList:
if trace and verbose: g.trace('**cache hit: %s' % (prefix))
return aList
# elif self.use_codewise:
# aList = self.get_codewise_completions(prefix)
# else:
# aList = self.get_leo_completions(prefix)
# Always try the Leo completions first.
# Fall back to the codewise completions.
aList = (
self.get_leo_completions(prefix) or
self.get_codewise_completions(prefix)
)
if trace: g.trace('**cash miss: %s' % (prefix))
d[prefix] = aList
return aList
#@+node:ekr.20110510120621.14539: *5* ac.get_codewise_completions & helpers
def get_codewise_completions(self, prefix):
'''Use codewise to | |
* self.t)
Y2 += 0.00000000007 * math.cos(0.56178116893 + 69160.04624808849 * self.t)
Y2 += 0.00000000009 * math.cos(4.89006567905 + 7994.77225950771 * self.t)
Y2 += 0.00000000009 * math.cos(1.87204677749 + 73711.99974514729 * self.t)
Y2 += 0.00000000008 * math.cos(6.11573252209 + 59414.7256922319 * self.t)
Y2 += 0.00000000008 * math.cos(2.34935594859 + 52183.1636476327 * self.t)
Y2 += 0.00000000007 * math.cos(0.38833894888 + 64742.2018006147 * self.t)
Y2 += 0.00000000007 * math.cos(0.26691004914 + 78267.83457756409 * self.t)
Y2 += 0.00000000007 * math.cos(3.93557943129 + 26107.81671995749 * self.t)
Y2 += 0.00000000007 * math.cos(1.72211044919 + 26068.47719815791 * self.t)
Y2 += 0.00000000006 * math.cos(0.91009879641 + 25028.7650288685 * self.t)
Y2 += 0.00000000007 * math.cos(4.52931279940 + 51962.7510051939 * self.t)
Y2 += 0.00000000007 * math.cos(3.63772636438 + 18208.05780571871 * self.t)
Y2 += 0.00000000008 * math.cos(1.62956568948 + 44937.3745090319 * self.t)
Y2 += 0.00000000008 * math.cos(3.38799810242 + 51066.18391357149 * self.t)
Y2 += 0.00000000006 * math.cos(4.15575122576 + 105411.23831396949 * self.t)
Y2 += 0.00000000007 * math.cos(3.29006350511 + 65697.80154222329 * self.t)
Y2 += 0.00000000006 * math.cos(1.27230132469 + 88477.23878841709 * self.t)
Y2 += 0.00000000006 * math.cos(1.32946065086 + 78477.25233764409 * self.t)
Y2 += 0.00000000006 * math.cos(4.53641896383 + 52602.6482915079 * self.t)
Y2 += 0.00000000007 * math.cos(2.19484677855 + 78283.62300310588 * self.t)
Y2 += 0.00000000006 * math.cos(0.97859883313 + 19805.0711090663 * self.t)
Y2 += 0.00000000005 * math.cos(5.12029859122 + 108903.80988083909 * self.t)
Y2 += 0.00000000006 * math.cos(0.51108107596 + 129380.37759516528 * self.t)
Y2 += 0.00000000005 * math.cos(3.17438180692 + 74821.37829724069 * self.t)
Y2 += 0.00000000007 * math.cos(3.90084269972 + 45892.9742506405 * self.t)
Y2 += 0.00000000005 * math.cos(5.53465282046 + 1059.1381127057 * self.t)
Y2 += 0.00000000005 * math.cos(0.53945271085 + 26084.2656236997 * self.t)
Y2 += 0.00000000005 * math.cos(5.12008322277 + 26092.0282944157 * self.t)
Y2 += 0.00000000007 * math.cos(3.38101832696 + 71493.2426409605 * self.t)
Y2 += 0.00000000005 * math.cos(2.15419212921 + 81706.5281871715 * self.t)
Y2 += 0.00000000005 * math.cos(5.99431638416 + 51322.85371887989 * self.t)
Y2 += 0.00000000005 * math.cos(5.10141066049 + 33968.23611239669 * self.t)
Y2 += 0.00000000005 * math.cos(6.07497929534 + 78256.83969520529 * self.t)
Y2 += 0.00000000005 * math.cos(4.51394707071 + 26617.35028918529 * self.t)
Y2 += 0.00000000006 * math.cos(5.11905884366 + 78260.07190684808 * self.t)
Y2 += 0.00000000004 * math.cos(2.02155104150 + 25661.5487681817 * self.t)
Y2 += 0.00000000005 * math.cos(3.63607614834 + 94138.57083756929 * self.t)
Y2 += 0.00000000004 * math.cos(1.02197474906 + 22645.08437912529 * self.t)
Y2 += 0.00000000005 * math.cos(3.86831492333 + 93029.19228547589 * self.t)
Y2 += 0.00000000004 * math.cos(6.23205767407 + 22760.01130277751 * self.t)
Y2 += 0.00000000004 * math.cos(2.54287848992 + 29416.28261533789 * self.t)
Y2 += 0.00000000004 * math.cos(5.04658501458 + 76145.18938182769 * self.t)
Y2 += 0.00000000004 * math.cos(5.53028555901 + 181506.18725640948 * self.t)
Y2 += 0.00000000004 * math.cos(4.30468862075 + 13521.9952590749 * self.t)
Y2 += 0.00000000004 * math.cos(5.69769574613 + 99799.90288672148 * self.t)
Y2 += 0.00000000004 * math.cos(2.07912078316 + 79853.02613748988 * self.t)
Y2 += 0.00000000005 * math.cos(3.94088501872 + 44181.52165860769 * self.t)
Y2 += 0.00000000004 * math.cos(5.75049226379 + 4551.7096795753 * self.t)
Y2 += 0.00000000004 * math.cos(5.78252204456 + 32371.2228090491 * self.t)
Y2 += 0.00000000004 * math.cos(6.13397098904 + 32858.36992533629 * self.t)
Y2 += 0.00000000004 * math.cos(5.47303342696 + 54509.2464935039 * self.t)
Y2 += 0.00000000004 * math.cos(3.77296926059 + 155468.28073673949 * self.t)
Y2 += 0.00000000003 * math.cos(1.76022764417 + 162188.99471608088 * self.t)
Y2 += 0.00000000003 * math.cos(3.47863732548 + 24498.58642880689 * self.t)
Y2 += 0.00000000005 * math.cos(5.41867016378 + 78271.06678920689 * self.t)
Y2 += 0.00000000004 * math.cos(0.03915152889 + 234790.88445668427 * self.t)
Y2 += 0.00000000003 * math.cos(3.63049034768 + 77734.26227711148 * self.t)
Y2 += 0.00000000004 * math.cos(5.73459041878 + 7879.84533585549 * self.t)
Y2 += 0.00000000004 * math.cos(4.32304044619 + 64608.09275102969 * self.t)
Y2 += 0.00000000004 * math.cos(6.08591965357 + 51116.18053547569 * self.t)
Y2 += 0.00000000003 * math.cos(3.39565834350 + 28306.41642827749 * self.t)
Y2 += 0.00000000004 * math.cos(4.14203046511 + 39744.0074502341 * self.t)
Y2 += 0.00000000003 * math.cos(6.25044565160 + 104332.1866228805 * self.t)
Y2 += 0.00000000004 * math.cos(2.32061218672 + 80482.71034639288 * self.t)
Y2 += 0.00000000003 * math.cos(3.42862683678 + 37410.32342239509 * self.t)
Y2 += 0.00000000004 * math.cos(6.26655525096 + 24864.32911827909 * self.t)
Y2 += 0.00000000003 * math.cos(0.81932036845 + 103822.16541868569 * self.t)
Y2 += 0.00000000004 * math.cos(2.51942976235 + 27311.96479983631 * self.t)
Y2 += 0.00000000003 * math.cos(0.55238739809 + 120226.47397914348 * self.t)
Y2 += 0.00000000003 * math.cos(2.73238956336 + 102133.09927959349 * self.t)
Y2 += 0.00000000003 * math.cos(6.16210003244 + 150866.33061777649 * self.t)
Y2 += 0.00000000003 * math.cos(3.47088315334 + 90830.10494218889 * self.t)
Y2 += 0.00000000003 * math.cos(1.70583242978 + 77624.05595589208 * self.t)
Y2 += 0.00000000003 * math.cos(1.64244671238 + 129484.15978374588 * self.t)
Y2 += 0.00000000003 * math.cos(3.01438324827 + 125887.80602829569 * self.t)
Y2 += 0.00000000003 * math.cos(3.59751110770 + 130969.45049044909 * self.t)
Y2 += 0.00000000003 * math.cos(3.38263477770 + 58459.1259506233 * self.t)
Y2 += 0.00000000002 * math.cos(2.33982742399 + 6282.83203250789 * self.t)
Y2 += 0.00000000003 * math.cos(5.89330250493 + 26941.3433408097 * self.t)
Y2 += 0.00000000002 * math.cos(3.22048008840 + 3327.89183879669 * self.t)
Y2 += 0.00000000003 * math.cos(4.49509466805 + 18093.13088206649 * self.t)
Y2 += 0.00000000002 * math.cos(2.81219352708 + 25132.0595824821 * self.t)
Y2 += 0.00000000003 * math.cos(4.76356263080 + 26095.2605060585 * self.t)
Y2 += 0.00000000003 * math.cos(4.55467549289 + 71025.27765060609 * self.t)
Y2 += 0.00000000003 * math.cos(4.71882234896 + 43071.65547154729 * self.t)
Y2 += 0.00000000002 * math.cos(4.39575489958 + 114565.14192999128 * self.t)
Y2 += 0.00000000002 * math.cos(1.23106381310 + 71980.87739221469 * self.t)
Y2 += 0.00000000002 * math.cos(0.75134917742 + 91785.70468379749 * self.t)
Y2 += 0.00000000002 * math.cos(0.91717698933 + 2648.6986429565 * self.t)
Y2 += 0.00000000002 * math.cos(2.32776941660 + 78378.3925308913 * self.t)
Y2 += 0.00000000003 * math.cos(3.33005216581 + 104355.73771913828 * self.t)
Y2 += 0.00000000002 * math.cos(0.88668781336 + 26081.03341205689 * self.t)
Y2 += 0.00000000002 * math.cos(1.33604738376 + 77197.45776501608 * self.t)
Y2 += 0.00000000002 * math.cos(1.70708347774 + 365230.88779952223 * self.t)
Y2 += 0.00000000002 * math.cos(1.39529664977 + 78690.55143308209 * self.t)
Y2 += 0.00000000003 * math.cos(3.63831731324 + 52815.9473869459 * self.t)
Y2 += 0.00000000002 * math.cos(2.08527920288 + 25448.2496727437 * self.t)
Y2 += 0.00000000003 * math.cos(5.17799187468 + 77837.35505133009 * self.t)
Y2 += 0.00000000002 * math.cos(5.22616840288 + 104371.52614468009 * self.t)
Y2 += 0.00000000002 * math.cos(2.61518622291 + 25028.27739390149 * self.t)
Y2 += 0.00000000002 * math.cos(1.38915456619 + 11322.9079157879 * self.t)
Y2 += 0.00000000002 * math.cos(0.72086659814 + 77211.68485901768 * self.t)
Y2 += 0.00000000002 * math.cos(2.08668332966 + 105307.45612538888 * self.t)
Y2 += 0.00000000002 * math.cos(2.01988438935 + 78050.65414676809 * self.t)
Y2 += 0.00000000002 * math.cos(1.66916828850 + 27043.2590656993 * self.t)
Y2 += 0.00000000002 * math.cos(2.76016995044 + 104344.74283677948 * self.t)
Y2 += 0.00000000002 * math.cos(0.83226802149 + 46514.23041651269 * self.t)
Y2 += 0.00000000002 * math.cos(0.22774400791 + 77154.08705514569 * self.t)
Y2 += 0.00000000002 * math.cos(0.91172072873 + 26725.1432310819 * self.t)
Y2 += 0.00000000002 * math.cos(1.61031164889 + 25451.1506870335 * self.t)
Y2 += 0.00000000002 * math.cos(0.95099409511 + 131499.14145554368 * self.t)
Y2 += 0.00000000002 * math.cos(4.49976969859 + 61279.9570947495 * self.t)
Y2 += 0.00000000002 * math.cos(5.98345605195 + 89586.61734051049 * self.t)
Y2 += 0.00000000002 * math.cos(2.85946010033 + 30639.61282114949 * self.t)
Y2 += 0.00000000002 * math.cos(0.15838364848 + 97581.14578253469 * self.t)
Y2 += 0.00000000002 * math.cos(6.14579824758 + 78149.51395352086 * self.t)
Y2 += 0.00000000002 * math.cos(5.58929876175 + 7238.4317741165 * self.t)
Y2 += 0.00000000002 * math.cos(0.83010213570 + 426.3543733925 * self.t)
Y2 += 0.00000000002 * math.cos(5.20457051939 + 107794.43132874569 * self.t)
Y2 += 0.00000000002 * math.cos(1.91271152917 + 47623.60896860609 * self.t)
Y2 += 0.00000000002 * math.cos(4.42235381341 + 104565.15547921829 * self.t)
Y2 += 0.00000000002 * math.cos(0.42021990927 + 44295.9609472929 * self.t)
Y2 += 0.00000000002 * math.cos(3.90488175057 + 18093.6185170335 * self.t)
Y2 += 0.00000000002 * math.cos(0.51702864254 + 10213.0417287275 * self.t)
Y2 += 0.00000000002 * math.cos(2.19510126559 + 104358.96993078108 * self.t)
Y2 += 0.00000000002 * math.cos(1.87197741484 + 104347.97504842229 * self.t)
Y2 += 0.00000000002 * math.cos(1.89656096023 + 134991.71302241329 * self.t)
Y2 += 0.00000000002 * math.cos(3.81068190615 + 129910.06856025988 * self.t)
Y2 += 0.00000000002 * math.cos(3.48333455064 + 57836.89451481709 * self.t)
Y2 += 0.00000000002 * math.cos(5.38520038611 + 25557.96835899609 * self.t)
Y2 += 0.00000000002 * math.cos(6.12255852160 + 54294.81396101029 * self.t)
Y2 += 0.00000000002 * math.cos(1.91069399856 + 102762.78348849648 * self.t)
Y2 += 0.00000000001 * math.cos(5.94439573345 + 7238.9194090835 * self.t)
Y2 += 0.00000000002 * math.cos(2.71520511595 + 25934.3681485729 * self.t)
Y2 += 0.00000000001 * math.cos(6.12042545447 + 151975.70916986988 * self.t)
Y2 += 0.00000000002 * math.cos(2.95370958319 + 26241.9257695425 * self.t)
Y2 += 0.00000000001 * math.cos(6.21917309565 + 131549.13807744789 * self.t)
Y2 += 0.00000000001 * math.cos(2.63165117521 + 23968.89546371229 * self.t)
Y2 += 0.00000000002 * math.cos(1.38299478051 + 25021.6514818677 * self.t)
Y2 += 0.00000000002 * math.cos(4.14961473905 + 27154.6424362477 * self.t)
Y2 += 0.00000000001 * math.cos(0.59279120743 + 181556.18387831368 * self.t)
Y2 += 0.00000000001 * math.cos(4.95292728782 + 27140.41534224609 * self.t)
| |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.210273,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 5.86579,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0495887,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.241638,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.265616,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.115524,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.186336,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0940563,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.395917,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0914043,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.47088,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0501805,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0048456,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0536937,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0358362,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.103874,
'Execution Unit/Register Files/Runtime Dynamic': 0.0406818,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.125518,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.311915,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.39553,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000350937,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000322059,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00013364,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00051479,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00153872,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00277904,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0344503,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.19133,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0797619,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.117009,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.5162,
'Instruction Fetch Unit/Runtime Dynamic': 0.235539,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0462159,
'L2/Runtime Dynamic': 0.00399916,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.59016,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.654742,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.043774,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.043774,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.79687,
'Load Store Unit/Runtime Dynamic': 0.914394,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.107939,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.215879,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0383079,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0390002,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.136249,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0130814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.358164,
'Memory Management Unit/Runtime Dynamic': 0.0520816,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.7778,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.132002,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00681857,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0567221,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
"""
recipestr1 = """
class TestApplyMacrosSource(PackageRecipe):
name = 'tmpwatch'
version = '2.9.0'
clearBuildReqs()
def cleanup(r, builddir, destdir):
pass
def setup(r):
# avoid cleanup
r.macros.sourcemacros = 'source-apply-macros'
r.addSource('myfile', contents="%(sourcemacros)s %(destdir)s",
macros=True)
r.macros.a = 'XXX'
r.macros.b = 'YYY'
r.macros.c = 'ZZZ'
r.addSource('sourcefile', macros=True, mode=0676)
"""
self.resetWork()
(built, d) = self.buildRecipe(recipestr1, "TestApplyMacrosSource")
rv = self.findInFile(util.joinPaths(self.buildDir, 'tmpwatch/tmpwatch-2.9.0/myfile'), 'source-apply-macros')
assert(rv != -1)
rv = self.findInFile(util.joinPaths(self.buildDir, 'tmpwatch/tmpwatch-2.9.0/myfile'), self.cfg.buildPath + '/tmpwatch/_ROOT_')
assert(rv != -1)
rv = self.findInFile(util.joinPaths(self.buildDir, 'tmpwatch/tmpwatch-2.9.0/sourcefile'), 'XXX YYY ZZZ')
assert(rv != -1)
assert(os.stat(util.joinPaths(self.buildDir, 'tmpwatch/tmpwatch-2.9.0/sourcefile'))[stat.ST_MODE] & 07777 == 0676)
def testPatchFilter(self):
recipestr = """
class TestPatchFilter(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
# avoid cleanup
r.addSource('unpatched1')
r.addSource('unpatched2')
r.addPatch('patchToFilter.patch')
"""
self.assertRaises(source.SourceError, self.buildRecipe, recipestr,
'TestPatchFilter')
recipestr = """
class TestPatchFilter(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
# avoid cleanup
r.addSource('unpatched1')
r.addSource('unpatched2')
r.addPatch('patchToFilter.patch', filter='sed s/Oops// |cat | cat')
"""
self.buildRecipe(recipestr, 'TestPatchFilter')
def testAction(self):
recipestr1 = """
class TestAction(PackageRecipe):
name = 'tmpwatch'
version = '2.9.0'
clearBuildReqs()
def cleanup(r, builddir, destdir):
pass
def setup(r):
r.addAction('mkdir asdf')
r.addAction('touch foo', dir='asdf')
"""
self.resetWork()
(built, d) = self.buildRecipe(recipestr1, "TestAction")
# should not raise an error
os.stat(util.joinPaths(self.buildDir, 'tmpwatch/tmpwatch-2.9.0/asdf/foo'))
def findInFile(self, filename, key):
f = open(filename)
contents = f.read()
return contents.find(key)
def testAutoSourcePermissions(self):
permsRecipe = """\
class TestPerms(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addSource('dc_client.init', rpm='distcache-1.4.5-2.src.rpm')
r.Run('test -x dc_client.init')
# get rid of "cowardlily refusing" message
r.Create('/foo')
"""
self.resetWork()
self.resetRepository()
self.repos = self.openRepository()
origDir = os.getcwd()
os.chdir(self.workDir)
self.newpkg('test')
os.chdir('test')
self.writeFile('test.recipe', permsRecipe)
self.addfile('test.recipe')
self.commit()
os.chdir('..')
shutil.rmtree('test')
os.chdir(origDir)
self.resetCache()
# this should fail if permissions are not restored
self.cookItem(self.repos, self.cfg, 'test', requireCleanSources=False)
def testAutoMainDir(self):
"""
Test mainDir automagic guessing.
"""
recipestr1 = """
class TestSource(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
# asdf.tar.gz contains asdf/fdsa and bam, not test-1/fdsa
r.addArchive('asdf.tar.gz')
# the next line will only work if mainDir was auto-set to asdf
r.Install('fdsa', '/')
"""
(built, d) = self.buildRecipe(recipestr1, "TestSource")
recipestr2 = """
class TestSource(PackageRecipe):
name = 'test'
version = '1'
clearBuildReqs()
def setup(r):
# asdf.tar.gz contains asdf/fdsa and bam, not test-1/fdsa
r.addArchive('asdf.tar.gz', dir='blah')
# the next line will only work if mainDir was auto-set to blah
r.Install('asdf/fdsa', '/')
"""
(built, d) = self.buildRecipe(recipestr2, "TestSource")
recipestr3 = """
class TestSource(PackageRecipe):
name = 'foo'
version = '1.0'
clearBuildReqs()
def setup(r):
# foo-1.0.tar.gz has foo-1.0/a and blah/b
r.addArchive('foo-1.0.tar.gz')
r.Install('a', '/')
"""
(built, d) = self.buildRecipe(recipestr3, "TestSource")
# Test for special characters in the filename
recipestr4 = """
class TestSource(PackageRecipe):
name = 'foo'
version = '1.0'
clearBuildReqs()
def setup(r):
# foo-1.0.tar.gz has foo-1.0/a and blah/b
r.addArchive('foo-1.0&;.tar.gz')
r.Install('a', '/')
"""
(built, d) = self.buildRecipe(recipestr4, "TestSource")
def testAutoMainDirGuessFailure(self):
recipestr = """
class TestSource(PackageRecipe):
name = 'foo'
version = '1.0'
clearBuildReqs()
def setup(r):
# foo-1.0.tar.gz has foo-1.0/a and blah/b
r.addSource('macros')
r.addArchive('distcc-2.9.tar.bz2')
"""
self.assertRaises(source.SourceError, self.buildRecipe, recipestr,
"TestSource")
def testSourceMagic(self):
d = tempfile.mkdtemp()
try:
# copy setup.tar.gz to a file without .gz extension
shutil.copyfile(resources.get_archive() + '/asdf.tar.gz',
d + '/asdf')
# look in our new source directory for sources
self.cfg.sourceSearchDir = d
r = policytest.DummyRecipe(self.cfg)
# test the Archive class when the archive does not end in .gz
os.mkdir('/'.join((r.macros.builddir, r.theMainDir)))
a = source.Archive(r, 'asdf')
a.doAction()
assert(os.path.isdir(r.macros.builddir + '/asdf'))
assert(os.path.isfile(r.macros.builddir + '/asdf/fdsa'))
finally:
shutil.rmtree(d)
def testAddBadPatch(self):
# make sure we don't get a Y/N prompt when a patch fails to apply
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm')
r.addPatch('unrelated.patch')
"""
self.logFilter.add()
rc = self.captureOutput(self.buildRecipe, recipestr1, "PatchTest",
_returnException=True, logLevel=log.INFO)
self.logFilter.remove()
msg = '\n'.join(x for x in self.logFilter.records if 'patch' in x)
expected = """+ attempting to apply /unrelated.patch to /test/tmpwatch-2.9.0/ with patch level(s) 1, 0, 2, 3
+ patch did not apply with --dry-run, trying level 1 directly
+ patch level 1 FAILED
+ can't find file to patch at input line 3
Perhaps you used the wrong -p or --strip option?
The text leading up to this was:
--------------------------
|--- foo1\t2006-06-19 21:52:47.000000000 -0400
|+++ foo2\t2006-06-19 21:52:50.000000000 -0400
--------------------------
File to patch:
Skip this patch? [y]
Skipping patch.
1 out of 1 hunk ignored
+ patch level 0 FAILED
+ can't find file to patch at input line 3
Perhaps you used the wrong -p or --strip option?
The text leading up to this was:
--------------------------
|--- foo1\t2006-06-19 21:52:47.000000000 -0400
|+++ foo2\t2006-06-19 21:52:50.000000000 -0400
--------------------------
File to patch:
Skip this patch? [y]
Skipping patch.
1 out of 1 hunk ignored
+ patch level 2 FAILED
+ can't find file to patch at input line 3
Perhaps you used the wrong -p or --strip option?
The text leading up to this was:
--------------------------
|--- foo1\t2006-06-19 21:52:47.000000000 -0400
|+++ foo2\t2006-06-19 21:52:50.000000000 -0400
--------------------------
File to patch:
Skip this patch? [y]
Skipping patch.
1 out of 1 hunk ignored
+ patch level 3 FAILED
+ can't find file to patch at input line 3
Perhaps you used the wrong -p or --strip option?
The text leading up to this was:
--------------------------
|--- foo1\t2006-06-19 21:52:47.000000000 -0400
|+++ foo2\t2006-06-19 21:52:50.000000000 -0400
--------------------------
File to patch:
Skip this patch? [y]
Skipping patch.
1 out of 1 hunk ignored
error: could not apply patch /unrelated.patch in directory /test/tmpwatch-2.9.0/"""
# normalize variable paths in the message
msg = msg.replace(self.buildDir, '')
msg = msg.replace(self.sourceSearchDir, '')
# centos behavioral differences
msg = msg.replace(
'missing header for unified diff at line 3 of patch\n', '')
self.assertEqual(msg, expected)
if rc[0].__class__ != source.SourceError:
self.fail('expected SourceError exception not raised')
# make sure no stdout/stderr output was produced
if rc[1]:
self.fail('unexpected output: %s' %rc[1])
def testAddGoodPatch(self):
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm')
r.addPatch('tmpwatch.fakebug.patch')
"""
self.logFilter.add()
rc = self.captureOutput(self.buildRecipe, recipestr1, "PatchTest",
logLevel=log.INFO)
self.logFilter.remove()
msg = '\n'.join(x for x in self.logFilter.records if 'patch' in x)
expected = """+ attempting to apply /tmpwatch.fakebug.patch to /test/tmpwatch-2.9.0/ with patch level(s) 1, 0, 2, 3
+ patching file tmpwatch.c
+ applied successfully with patch level 1"""
# normalize variable paths in the message
msg = msg.replace(self.buildDir, '')
msg = msg.replace(self.sourceSearchDir, '')
self.assertEqual(msg, expected)
# make sure no stdout/stderr output was produced
if rc[1]:
self.fail('unexpected output: %s' %rc[1])
def testAddGoodLevel0Patch(self):
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm')
r.addPatch('tmpwatch.fakebug.level0.patch')
"""
self.logFilter.add()
rc = self.captureOutput(self.buildRecipe, recipestr1, "PatchTest",
logLevel=log.INFO)
self.logFilter.remove()
msg = '\n'.join(x for x in self.logFilter.records if 'patch' in x)
expected = """+ attempting to apply /tmpwatch.fakebug.level0.patch to /test/tmpwatch-2.9.0/ with patch level(s) 1, 0, 2, 3
+ patching file tmpwatch.c
+ applied successfully with patch level 0"""
# normalize variable paths in the message
msg = msg.replace(self.buildDir, '')
msg = msg.replace(self.sourceSearchDir, '')
self.assertEqual(msg, expected)
# make sure no stdout/stderr output was produced
if rc[1]:
self.fail('unexpected output: %s' %rc[1])
def testAddGoodButRejectedPatch(self):
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
r.addArchive('tmpwatch-2.9.0.tar.gz', rpm='tmpwatch-2.9.0-2.src.rpm')
r.addPatch('tmpwatch.fakebug.rej.patch')
"""
self.logFilter.add()
rc = self.captureOutput(self.buildRecipe, recipestr1, "PatchTest",
_returnException=True, logLevel=log.INFO)
self.logFilter.remove()
msg = '\n'.join(x for x in self.logFilter.records if 'patch' in x)
expected = """+ attempting to apply /tmpwatch.fakebug.rej.patch to /test/tmpwatch-2.9.0/ with patch level(s) 1, 0, 2, 3
+ patch did not apply with --dry-run, trying level 1 directly
+ patch level 1 FAILED
+ patching file tmpwatch.c
Hunk #1 FAILED at 419.
1 out of 1 hunk FAILED -- saving rejects to file tmpwatch.c.rej
+ patch level 0 failed - probably wrong level
+ patch level 2 failed - probably wrong level
+ patch level 3 failed - probably wrong level
error: could not apply patch /tmpwatch.fakebug.rej.patch in directory /test/tmpwatch-2.9.0/"""
# normalize variable paths in the message
msg = msg.replace(self.buildDir, '')
msg = msg.replace(self.sourceSearchDir, '')
self.assertEqual(msg, expected)
# make sure no stdout/stderr output was produced
if rc[1]:
self.fail('unexpected output: %s' %rc[1])
# make sure we've written out the reject file
assert(os.path.exists(self.buildDir + '/test/tmpwatch-2.9.0/tmpwatch.c.rej'))
def testAddPartiallyApplicablePatch(self):
# patch partially applies at level one and completely
# applies at level 2.
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
contents = '\\n'.join(str(x) for x in range(1, 21)) + '\\n'
r.addSource('foo', contents=contents)
r.addSource('bar', contents=contents)
r.addAction('mkdir subdir; cp foo subdir/; touch subdir/bar')
r.addPatch('partial.patch')
"""
self.logFilter.add()
rc = self.captureOutput(self.buildRecipe, recipestr1, "PatchTest",
_returnException=True, logLevel=log.INFO)
self.logFilter.remove()
# patch does not partially apply with -p1
assert('change' not in open(self.buildDir + '/test/test-1.0/subdir/foo').read())
# patch applies with -p2
assert('change' in open(self.buildDir + '/test/test-1.0/foo').read())
def testPatchSameFileTwiceInOnePatch(self):
# CNY-2142
recipestr1 = """\
class PatchTest(PackageRecipe):
name = 'test'
version = '1.0'
clearBuildReqs()
def setup(r):
contents = '\\n'.join(str(x) for x in range(1, | |
using re.match(). If a match is found, the "read" event is not logged.
LOG_IGNORE_USER_AGENT = []
LOG_IGNORE_IP_ADDRESS = []
LOG_IGNORE_SUBJECT = []
# Ignore "read" events for DataONE trusted subjects.
# True (default):
# - "read" events are not logged in requests made by subjects which are in the
# DATAONE_TRUSTED_SUBJECTS list or are CN subjects in the DataONE environment in
# which this node is registered.
# False:
# - Do not apply this filter.
LOG_IGNORE_TRUSTED_SUBJECT = True
# Ignore "read" event for subjects authenticated by the client side certificate.
# True (default):
# - "read" events are not logged in requests which where authenticated using
# this MN's local client side certificate.
# False:
# - Do not apply this filter.
LOG_IGNORE_NODE_SUBJECT = True
# ==============================================================================
# Path to the client side certificate that GMN uses when initiating TLS/SSL
# connections to Coordinating Nodes. The certificate must be in PEM format.
CLIENT_CERT_PATH = "/var/local/dataone/certs/client/client_cert.pem"
# Path to the private key for the client side certificate set in
# CLIENT_CERT_PATH. The private key must be in PEM format. This is only
# required to be set if the certificate does not contain an embedded private
# key. Otherwise, set it to None.
CLIENT_CERT_PRIVATE_KEY_PATH = (
"/var/local/dataone/certs/client/client_key_nopassword.pem"
)
# Absolute Path to the root of the GMN object store. The object store is a
# directory hierarchy in which the bytes of science objects are stored by
# default.
OBJECT_STORE_PATH = "/var/local/dataone/gmn_object_store"
# Enable this node to be used as a replication target.
# True:
# - DataONE can use this node to store replicas of science objects.
# False (default):
# - This node will advertise that it is not available as a replication target
# in the Replication Policy section of the Node document. It will also enforce
# this setting by refusing calls to MNReplication.replicate().
NODE_REPLICATE = False
# The maximum size, in octets (8-bit bytes), of each object this node is willing
# to accept for replication. Set to -1 to allow objects of any size. E.g. for a
# maximum object size of 1GiB: 1024**3
REPLICATION_MAXOBJECTSIZE = -1
# The total space, in octets (8-bit bytes), that this node is providing for
# replication. Set to -1 to provide unlimited space (not recommended).
# E.g. for a total space of 10 GiB: 10 * 1024**3
REPLICATION_SPACEALLOCATED = 10 * 1024 ** 3
# A list of nodes for which this node is willing to replicate content. To allow
# objects from any node to be replicated, set to an empty list.
# E.g.: ('urn:node:MemberNodeA','urn:node:MemberNodeB','urn:node:MemberNodeC')
REPLICATION_ALLOWEDNODE = ()
# A list of object formats for objects which this node is willing replicate.
# To allow any object type to be replicated, set to an empty list.
# E.g.: ('eml://ecoinformatics.org/eml-2.0.0', 'CF-1.0')
REPLICATION_ALLOWEDOBJECTFORMAT = ()
# The maximum number of attempts to complete a CN replication request. When this
# number is exceeded, the CN is notified that the requested replica could not be
# created and the request is recorded as failed. By default, replication
# processing occurs once per hour, so a value of 24 (default) causes replication
# to be retried for 24 hours.
REPLICATION_MAX_ATTEMPTS = 24
# Accept only public objects for replication
# True:
# - This node will deny any replication requests for access controlled objects.
# False (default):
# - Replication requests are accepted for access controlled objects provided
# that all other criteria are met.
REPLICATION_ALLOW_ONLY_PUBLIC = False
# The maximum number of attempts to complete a CN System Metadata refresh
# request. When this number is exceeded, the request is recorded as failed and
# permanently removed. By default, System Metadata refresh processing occurs
# once per hour, so a value of 24 (default) causes the refresh to be retried for
# 24 hours.
SYSMETA_REFRESH_MAX_ATTEMPTS = 24
# On startup, GMN connects to the DataONE root CN to get the subject strings of
# the CNs in the environment. For a production instance of GMN, this should be
# set to the default DataONE root for production systems. For a test instance,
# this should be set to the root of the test environment in which GMN is to run.
# The CN subjects are used for controlling access to MN API methods for which
# only CNs should have access. See also the STAND_ALONE setting.
DATAONE_ROOT = d1_common.const.URL_DATAONE_ROOT
# DATAONE_ROOT = 'https://cn-stage.test.dataone.org/cn'
# DATAONE_ROOT = 'https://cn-sandbox.test.dataone.org/cn'
# DATAONE_ROOT = 'https://cn-dev.test.dataone.org/cn'
# Subjects for implicitly trusted DataONE infrastructure. Connections containing client
# side certificates with these subjects bypass access control rules and have access to
# REST interfaces meant only for use by CNs. These subjects are added to the subjects
# discovered by connecting to the DataONE root CN. See the DATAONE_ROOT setting. If the
# STAND_ALONE setting is set to True, these become the only trusted subjects.
DATAONE_TRUSTED_SUBJECTS = set(
[
# For testing and debugging, it's possible to add the public subject here.
# This circumvents all access control, making all content publicly accessible.
# d1_common.const.SUBJECT_PUBLIC, # Only use for testing and debugging
# As with the public subject, it's possible to add the authenticatedUser
# subject here, to let any authenticated user access any method.
# d1_common.const.SUBJECT_AUTHENTICATED, # Only use for testing and debugging
# Specific authenticated users can also be added.
#'any-authenticated-user-subject',
]
)
# When DEBUG=False and a view raises an exception, Django will send emails to
# these addresses with the full exception information.
ADMINS = (("<NAME>", "<EMAIL>"),)
# Enable MNRead.listObjects() for public and regular authenticated users.
#
# True (default):
# - MNRead.listObjects() can be called by any level of user (trusted
# infrastructure, authenticated and public), and results are filtered
# to list only objects to which the user has access.
# False:
# - MNRead.listObjects() can only be called by trusted infrastructure (CNs).
#
# The primary means for a user to discover objects is to use the search
# facilities exposed by CNs. By enabling this option, regular users can also
# discover objects directly on the node by iterating over the object list. This
# is disabled by default because the call can be expensive (as it must create a
# filtered list of all objects on the node for each page that is returned).
# These are also the reasons that DataONE specified implementation of access
# control for public and regular users to be optional for this API.
PUBLIC_OBJECT_LIST = True
# Enable MNCore.getLogRecords() access for public and regular authenticated
# users.
#
# True (default):
# - MNCore.getLogRecords() can be called by any level of user (trusted
# infrastructure, authenticated and public), and results are filtered
# to list only log records to which the user has access. In particular,
# this means that all users can retrieve log records for public objects.
# False:
# - MNCore.getLogRecords() can only be called by trusted infrastructure (CNs).
#
# Regardless of this setting, the DataONE Coordinating Nodes provide access
# controlled log records which are aggregated across all Member Nodes that hold
# replicas of a given object. Setting this to True allows users to get log
# records directly from this Member Node in addition to the aggregated logs
# available from CNs.
PUBLIC_LOG_RECORDS = True
# Set permissions required for calling the MNStorage.update() API method.
# True (default):
# - A user must both have write permission on an object and be in the
# whitelist for Create, Update and Delete in order to update the object.
# False:
# - Any user that has write permission on an object can update it.
REQUIRE_WHITELIST_FOR_UPDATE = True
# This setting determines how Open Archives Initiative Object Reuse and Exchange
# (OAI-ORE) Resource Maps are handled if one or more of the objects referenced
# in the Resource Map do not (yet) exist on this node.
#
# Resource Maps are documents that describe aggregations of web resources. In
# DataONE, they are used for defining data packages, where a data package is a
# collection of science objects. A data package can be downloaded as a
# compressed archive with the MNPackage.getPackage() API method.
#
# For more information about data packages in DataONE, see
# https://releases.dataone.org/online/api-documentation-v2.0.1/design
# /DataPackage.html
#
# To ensure that a Resource Map references only the intended objects, it should
# reference only objects on this node and be created after all referenced
# objects are created. This setting takes effect only when that is not the case.
#
# 'block' (default):
# - Resource Maps can only be created if all referenced objects exist | |
import bpy
from bpy_extras import view3d_utils
from .functions_drawing import *
from .functions_modal import *
from .classes import *
def basic_ui_hover_keymap(self, context, event):
status = {'RUNNING_MODAL'}
# test hover panels
if event.type == 'MOUSEMOVE' and self.click_hold == False:
hov_status = self._window.test_hover(self._mouse_reg_loc.tolist())
self.ui_hover = hov_status != None
# click down move
if event.type == 'MOUSEMOVE' and self.click_hold:
self._window.click_down_move(
self._mouse_reg_loc.tolist(), event.shift, arguments=[event])
# Panel scrolling
if event.type == 'WHEELDOWNMOUSE' and event.value == 'PRESS':
scroll_status = self._window.scroll_panel(10)
if scroll_status:
status = {'RUNNING_MODAL'}
if event.type == 'WHEELUPMOUSE' and event.value == 'PRESS':
scroll_status = self._window.scroll_panel(-10)
if scroll_status:
status = {'RUNNING_MODAL'}
if event.type == 'Z' and event.value == 'PRESS' and event.ctrl:
if event.shift:
move_undostack(self, -1)
self._window.set_key('Undo')
else:
move_undostack(self, 1)
self._window.set_key('Redo')
if event.type == 'LEFTMOUSE' and event.value == 'PRESS' and not event.ctrl:
status = {'RUNNING_MODAL'}
# Test 2d ui selection
panel_status = self._window.test_click_down(
self._mouse_reg_loc.tolist(), event.shift, arguments=[event])
self.click_hold = True
if panel_status:
if panel_status[0] == 'GIZMO':
gizmo_click_init(self, event, panel_status[1])
self.ui_hover = False
else:
if panel_status[0] == {'CANCELLED'}:
status = panel_status[0]
if panel_status[0] == {'FINISHED'}:
status = panel_status[0]
if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
status = {'RUNNING_MODAL'}
panel_status = self._window.test_click_up(
self._mouse_reg_loc.tolist(), event.shift, arguments=[event])
self.click_hold = False
if panel_status:
rco = view3d_utils.location_3d_to_region_2d(
self.act_reg, self.act_rv3d, self._orbit_ob.location)
if rco != None:
self.gizmo_reposition_offset = [
self._gizmo_panel.position[0]-rco[0], self._gizmo_panel.position[1]-rco[1]]
if panel_status[0] == 'NUMBER_BAR_TYPE':
self.typing = True
if panel_status[0] == {'CANCELLED'}:
status = panel_status[0]
if panel_status[0] == {'FINISHED'}:
status = panel_status[0]
keys = keys_find(self.keymap.keymap_items, event)
if len(keys) == 0:
keys = []
# return status
# else:
# status = {"RUNNING_MODAL"}
# Toggle Gizmo
key = 'Toggle Gizmo'
if key in keys:
self._use_gizmo = not self._use_gizmo
self._gizmo_bool.toggle_bool()
update_orbit_empty(self)
if self._container.sel_status.any():
gizmo_update_hide(self, True)
else:
gizmo_update_hide(self, False)
self._window.set_key(key)
return status
# Cancel modal
if 'Cancel Modal' in keys:
ob = self._object
if self._object.as_pointer() != self._object_pointer:
for o_ob in bpy.data.objects:
if o_ob.as_pointer() == self._object_pointer:
ob = o_ob
finish_modal(self, True)
status = {'CANCELLED'}
# Confirm modal
if 'Confirm Modal' in keys:
ob = self._object
if self._object.as_pointer() != self._object_pointer:
for o_ob in bpy.data.objects:
if o_ob.as_pointer() == self._object_pointer:
ob = o_ob
finish_modal(self, False)
status = {'FINISHED'}
return status
def basic_keymap(self, context, event):
status = {'RUNNING_MODAL'}
# test hover ui
if event.type == 'MOUSEMOVE' and self.click_hold == False:
status = {'RUNNING_MODAL'}
hov_status = self._window.test_hover(self._mouse_reg_loc.tolist())
self.ui_hover = hov_status != None
if self.ui_hover:
return status
# Gizmo stuff
if event.type == 'MIDDLEMOUSE':
self.waiting = True
gizmo_update_hide(self, False)
# view moved so update gizmo
update_gizmo = False
if context.region_data.view_matrix != self.prev_view:
update_gizmo = True
# middle mouse released so update gizmo
if self.waiting and event.value == 'RELEASE':
update_gizmo = True
if update_gizmo and self._use_gizmo:
self._window.update_gizmo_pos(self._orbit_ob.matrix_world)
relocate_gizmo_panel(self)
self.prev_view = context.region_data.view_matrix.copy()
self.waiting = False
if self._container.sel_status.any():
gizmo_update_hide(self, True)
else:
gizmo_update_hide(self, False)
# undo redo
if event.type == 'Z' and event.value == 'PRESS' and event.ctrl:
if event.shift:
move_undostack(self, -1)
self._window.set_key('Undo')
else:
move_undostack(self, 1)
self._window.set_key('Redo')
return status
#
#
#
keys = keys_find(self.keymap.keymap_items, event)
if len(keys) == 0:
nav_status = test_navigation_key(self.nav_list, event)
if nav_status:
self._window.set_key('Navigation')
return {'PASS_THROUGH'}
#
#
# SHORTCUT KEYS
if True:
# hide unselected normals
key = 'Hide Unselected'
if key in keys:
if self._container.sel_status.all() == False:
self._container.hide_status[~self._container.sel_status] = True
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# hide selected normals
key = 'Hide Selected'
if key in keys:
if self._container.sel_status.any():
self._container.hide_status[self._container.sel_status] = True
self._container.sel_status[:] = False
self._container.act_status[:] = False
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# unhide normals
key = 'Unhide'
if key in keys:
if self._container.hide_status.any():
self._container.sel_status[self._container.hide_status] = True
self._container.hide_status[:] = False
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# clear rotation
key = 'Reset Gizmo Rotation'
if key in keys:
if self._use_gizmo:
loc = self._orbit_ob.location.copy()
self._orbit_ob.matrix_world = self._object.matrix_world
self._orbit_ob.matrix_world.translation = loc
self._window.update_gizmo_orientation(
self._orbit_ob.matrix_world)
self._window.set_key(key)
return status
# Rotate Normals
key = 'Rotate Normals'
if key in keys:
update_filter_weights(self)
if self._container.sel_status.any():
avg_loc = np.mean(
self._container.loop_coords[self._container.sel_status], axis=0)
self._window.set_status('VIEW ROTATION')
self._container.cache_norms[:] = self._container.new_norms
self._mode_cache.clear()
self._mode_cache.append(avg_loc)
self._mode_cache.append(0)
self._mode_cache.append(1)
self._mouse_init[:] = self._mouse_reg_loc
self.rotating = True
self._current_tool = self._rotate_norms_tool
self.tool_mode = True
keymap_rotating(self)
gizmo_update_hide(self, False)
self.selection_drawing = True
start_active_drawing(self)
self._window.set_key(key)
return status
# toggle xray
key = 'Toggle X-Ray'
if key in keys:
self._x_ray_mode = not self._x_ray_mode
self._xray_bool.toggle_bool()
self._window.set_key(key)
return status
# Toggle Gizmo
key = 'Toggle Gizmo'
if key in keys:
self._use_gizmo = not self._use_gizmo
self._gizmo_bool.toggle_bool()
update_orbit_empty(self)
if self._container.sel_status.any():
gizmo_update_hide(self, True)
else:
gizmo_update_hide(self, False)
self._window.set_key(key)
return status
# Mirror Normals
key = 'Mirror Normals Start'
if key in keys:
if self._container.sel_status.any():
self.tool_mode = True
self._current_tool = self._mirror_tool
keymap_mirror(self)
self._window.set_key(key)
# Smooth Normals
key = 'Smooth Normals'
if key in keys:
if self._container.sel_status.any():
smooth_normals(self, 0.5)
self._window.set_key(key)
# Flatten Normals
key = 'Flatten Normals Start'
if key in keys:
if self._container.sel_status.any():
self.tool_mode = True
self._current_tool = self._flatten_tool
keymap_flatten(self)
self._window.set_key(key)
# Align Normals
key = 'Align Normals Start'
if key in keys:
if self._container.sel_status.any():
self.tool_mode = True
self._current_tool = self._align_tool
keymap_align(self)
self._window.set_key(key)
# Copy Active Normal
key = 'Copy Active Normal'
if key in keys:
if self._container.act_status.any():
store_active_normal(self)
self._window.set_key(key)
# Paste Stored Normal
key = 'Paste Stored Normal'
if key in keys:
if self._container.sel_status.any():
paste_normal(self)
self._window.set_key(key)
# Paste Active Normal to Selected
key = 'Paste Active Normal to Selected'
if key in keys:
if self._container.act_status.any():
copy_active_to_selected(self)
self._window.set_key(key)
# Set Normals Outside
key = 'Set Normals Outside'
if key in keys:
if self._container.sel_status.any():
set_outside_inside(self, 1)
self._window.set_key(key)
# Set Normals Inside
key = 'Set Normals Inside'
if key in keys:
if self._container.sel_status.any():
set_outside_inside(self, -1)
self._window.set_key(key)
# Flip Normals
key = 'Flip Normals'
if key in keys:
if self._container.sel_status.any():
flip_normals(self)
self._window.set_key(key)
# Reset Vectors
key = 'Reset Vectors'
if key in keys:
if self._container.sel_status.any():
reset_normals(self)
self._window.set_key(key)
# Average Individual Normals
key = 'Average Individual Normals'
if key in keys:
if self._container.sel_status.any():
average_vertex_normals(self)
self._window.set_key(key)
# Average Selected Normals
key = 'Average Selected Normals'
if key in keys:
if self._container.sel_status.any():
average_selected_normals(self)
self._window.set_key(key)
# Set Normals from Faces
key = 'Set Normals From Faces'
if key in keys:
if self._container.sel_status.any():
set_normals_from_faces(self)
self._window.set_key(key)
#
#
# SELECTION KEYS
if True:
# invert selection
key = 'Invert Selection'
if key in keys:
if self._container.hide_status.all() == False:
self._container.act_status[:] = False
self._container.sel_status[~self._container.hide_status] = ~self._container.sel_status[~self._container.hide_status]
self._active_face = None
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# box select
key = 'Box Select Start'
if key in keys:
bpy.context.window.cursor_modal_set('CROSSHAIR')
self._current_tool = self._box_sel_tool
self.tool_mode = True
self.selection_drawing = True
keymap_box_selecting(self)
gizmo_update_hide(self, False)
self._window.set_key(key)
return status
# circle select
key = 'Circle Select Start'
if key in keys:
bpy.context.window.cursor_modal_set('CROSSHAIR')
self._current_tool = self._circle_sel_tool
self.tool_mode = True
self.selection_drawing = True
self.circle_selecting = True
keymap_circle_selecting(self)
gizmo_update_hide(self, False)
self._window.set_key(key)
return status
# lasso select
key = 'Lasso Select Start'
if key in keys:
bpy.context.window.cursor_modal_set('CROSSHAIR')
self._current_tool = self._lasso_sel_tool
self.tool_mode = True
self.selection_drawing = True
keymap_lasso_selecting(self)
gizmo_update_hide(self, False)
self._window.set_key(key)
return status
# select all normals
key = 'Select All'
if key in keys:
change = not self._container.sel_status.all()
if change:
self._container.sel_status[:] = True
self._active_face = None
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# unselect all normals
key = 'Unselect All'
if key in keys:
change = self._container.sel_status.any()
if change:
self._container.sel_status[:] = False
self._container.act_status[:] = False
self._active_point = None
self._active_face = None
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# select linked normals
key = 'Select Linked'
if key in keys:
change = False
if self._container.sel_status.any():
vis_pos = get_visible_points(self)
sel_inds = get_selected_points(self, any_selected=True)
new_sel = get_linked_geo(
self._object_bm, list(sel_inds), vis=list(vis_pos))
if len(new_sel) > 0:
self._container.sel_status[get_vert_ls(
self, new_sel)] = True
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# select linked under cursor normals
key = 'Select Hover Linked'
if key in keys:
# selection test
face_res = ray_cast_to_mouse(self)
if face_res != None:
vis_pos = get_visible_points(self)
hov_inds = [
v.index for v in self._object_bm.faces[face_res[1]].verts if v.index in vis_pos]
new_sel = get_linked_geo(
self._object_bm, hov_inds, vis=list(vis_pos))
if len(new_sel) > 0:
self._container.sel_status[get_vert_ls(
self, new_sel)] = True
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# New Click selection
key = 'New Click Selection'
if key in keys:
sel_res = selection_test(self, False)
if sel_res:
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# Add Click selection
key = 'Add Click Selection'
if key in keys:
sel_res = selection_test(self, True)
if sel_res:
add_to_undostack(self, 0)
self._window.set_key(key)
return status
# New Edge loop selection
key = 'New | |
<filename>desktop/core/ext-py/openpyxl-2.6.4/openpyxl/styles/builtins.py
from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyxl
# Builtins styles as defined in Part 4 Annex G.2
from .named_styles import NamedStyle
from openpyxl.xml.functions import fromstring
normal = """
<namedStyle builtinId="0" name="Normal">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma = """
<namedStyle builtinId="3" name="Comma">
<alignment/>
<number_format>_-* #,##0.00\\ _$_-;\\-* #,##0.00\\ _$_-;_-* "-"??\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
comma_0 = """
<namedStyle builtinId="6" name="Comma [0]">
<alignment/>
<number_format>_-* #,##0\\ _$_-;\\-* #,##0\\ _$_-;_-* "-"\\ _$_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency = """
<namedStyle builtinId="4" name="Currency">
<alignment/>
<number_format>_-* #,##0.00\\ "$"_-;\\-* #,##0.00\\ "$"_-;_-* "-"??\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
currency_0 = """
<namedStyle builtinId="7" name="Currency [0]">
<alignment/>
<number_format>_-* #,##0\\ "$"_-;\\-* #,##0\\ "$"_-;_-* "-"\\ "$"_-;_-@_-</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
percent = """
<namedStyle builtinId="5" name="Percent">
<alignment/>
<number_format>0%</number_format>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
hyperlink = """
<namedStyle builtinId="8" name="Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="10"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
followed_hyperlink = """
<namedStyle builtinId="9" name="Followed Hyperlink" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="11"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
title = """
<namedStyle builtinId="15" name="Title">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Cambria"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="18"/>
<scheme val="major"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_1 = """
<namedStyle builtinId="16" name="Headline 1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="15"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_2 = """
<namedStyle builtinId="17" name="Headline 2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="thick">
<color theme="4" tint="0.5"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="13"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_3 = """
<namedStyle builtinId="18" name="Headline 3" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="medium">
<color theme="4" tint="0.4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
headline_4 = """
<namedStyle builtinId="19" name="Headline 4">
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="3"/>
<sz val="11"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
good = """
<namedStyle builtinId="26" name="Good" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFC6EFCE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF006100"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
bad = """
<namedStyle builtinId="27" name="Bad" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFC7CE"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C0006"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
neutral = """
<namedStyle builtinId="28" name="Neutral" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFEB9C"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF9C6500"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
input = """
<namedStyle builtinId="20" name="Input" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFCC99"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FF3F3F76"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
output = """
<namedStyle builtinId="21" name="Output" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF3F3F3F"/>
</left>
<right style="thin">
<color rgb="FF3F3F3F"/>
</right>
<top style="thin">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="thin">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FF3F3F3F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
calculation = """
<namedStyle builtinId="22" name="Calculation" >
<alignment/>
<border>
<left style="thin">
<color rgb="FF7F7F7F"/>
</left>
<right style="thin">
<color rgb="FF7F7F7F"/>
</right>
<top style="thin">
<color rgb="FF7F7F7F"/>
</top>
<bottom style="thin">
<color rgb="FF7F7F7F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFF2F2F2"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
linked_cell = """
<namedStyle builtinId="24" name="Linked Cell" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom style="double">
<color rgb="FFFF8001"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFA7D00"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
check_cell = """
<namedStyle builtinId="23" name="Check Cell" >
<alignment/>
<border>
<left style="double">
<color rgb="FF3F3F3F"/>
</left>
<right style="double">
<color rgb="FF3F3F3F"/>
</right>
<top style="double">
<color rgb="FF3F3F3F"/>
</top>
<bottom style="double">
<color rgb="FF3F3F3F"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFA5A5A5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
warning = """
<namedStyle builtinId="11" name="Warning Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color rgb="FFFF0000"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
note = """
<namedStyle builtinId="10" name="Note" >
<alignment/>
<border>
<left style="thin">
<color rgb="FFB2B2B2"/>
</left>
<right style="thin">
<color rgb="FFB2B2B2"/>
</right>
<top style="thin">
<color rgb="FFB2B2B2"/>
</top>
<bottom style="thin">
<color rgb="FFB2B2B2"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor rgb="FFFFFFCC"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
explanatory = """
<namedStyle builtinId="53" name="Explanatory Text" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<i val="1"/>
<color rgb="FF7F7F7F"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
total = """
<namedStyle builtinId="25" name="Total" >
<alignment/>
<border>
<left/>
<right/>
<top style="thin">
<color theme="4"/>
</top>
<bottom style="double">
<color theme="4"/>
</bottom>
<diagonal/>
</border>
<fill>
<patternFill/>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<b val="1"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1 = """
<namedStyle builtinId="29" name="Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_20 = """
<namedStyle builtinId="30" name="20 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_40 = """
<namedStyle builtinId="31" name="40 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.5999938962981048"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_1_60 = """
<namedStyle builtinId="32" name="60 % - Accent1" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="4" tint="0.3999755851924192"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>
"""
accent_2 = """<namedStyle builtinId="33" name="Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="0"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection hidden="0" locked="1"/>
</namedStyle>"""
accent_2_20 = """
<namedStyle builtinId="34" name="20 % - Accent2" >
<alignment/>
<border>
<left/>
<right/>
<top/>
<bottom/>
<diagonal/>
</border>
<fill>
<patternFill patternType="solid">
<fgColor theme="5" tint="0.7999816888943144"/>
<bgColor indexed="65"/>
</patternFill>
</fill>
<font>
<name val="Calibri"/>
<family val="2"/>
<color theme="1"/>
<sz val="12"/>
<scheme val="minor"/>
</font>
<protection | |
= True
inside &= (theta[0]>=grid.minage and theta[0]<=grid.maxage)
inside &= (theta[1]>=grid.minmetal and theta[1]<=grid.maxmetal)
# no distmod limits
inside &= (theta[3]>=0)
if inside:
return 0.0
return -np.inf
def emcee_lnprob(theta, x, y, yerr, grid, isonames, fixpars, fixparvals):
"""
This helper function calculates the log probability for the MCMC portion of fit().
Parameters
----------
theta : array
Input parameters [age, metal, ext, distmod].
x : array
Array of x-values for y. Not really used.
y : array
Observed photometry.
yerr : array
Uncertainties in the observed photometry.
grid : IsoGrid object
Grid of isochrones.
isonames : list
The list of isochrone column names to use.
fixpars : list
Boolean list/array indicating if parameters are fixed or not.
fixparvals: list
List/array of values to use for fixed parameters.
Outputs
-------
lnprob : float
The log probability value, which is the sum of the log prior and the
log likelihood.
"""
#print(theta)
pars = allpars(theta,fixpars,fixparvals)
lp = emcee_lnprior(pars,grid)
if not np.isfinite(lp):
return -np.inf
return lp + emcee_lnlike(theta, x, y, yerr, grid, isonames, fixpars, fixparvals)
def objectiveiso(theta, y, yerr, grid, isonames, fixpars, fixparvals):
# Get all 4 parameters
pars = allpars(theta,fixpars,fixparvals)
print('objectiveiso: ',pars)
iso = grid(pars[0],pars[1],pars[2],pars[3],names=isonames)
# Get isochrone photometry array
isophot = isophotprep(iso,isonames)
# Do the comparison
sumdist1,meddist1,chisq1,dist1 = isocomparison(y,isophot,yerr)
return 0.5*chisq1
def funiso(theta,cphot,cphoterr,grid,isonames,fixpars,fixparvals,verbose=False):
""" Return the function and gradient."""
pars = allpars(theta,fixpars,fixparvals)
ncat = len(cphoterr)
nfreepars = np.sum(~fixpars)
grad = np.zeros(nfreepars,float)
pcount = 0
if verbose:
print('funiso: ',pars)
# Original model
iso0 = grid(*pars)
isophot0 = isophotprep(iso0,isonames)
sumdist0,meddist0,chisq0,dist0 = isocomparison(cphot,isophot0,cphoterr)
lnlike0 = 0.5*chisq0
# Bad input parameter values
if (pars[0]<grid.minage or pars[0]>grid.maxage) or (pars[1]<grid.minmetal or pars[1]>grid.maxmetal) or \
(pars[2]<0):
return np.inf, np.array(4,float)
# Derivative in age
if fixpars[0]==False:
pars1 = np.array(pars).copy()
step = 0.05*pars[0]
if pars1[0]+step>grid.maxage:
step -= step
pars1[0] += step
iso1 = grid(*pars1)
isophot1 = isophotprep(iso1,isonames)
sumdist1,meddist1,chisq1,dist1 = isocomparison(cphot,isophot1,cphoterr)
lnlike1 = 0.5*chisq1
grad[pcount] = (lnlike1-lnlike0)/step
pcount += 1
# Derivative in metallicity
if fixpars[1]==False:
pars2 = np.array(pars).copy()
step = 0.05
if pars2[1]+step>grid.maxmetal:
step -= step
pars2[1] += step
iso2 = grid(*pars2)
isophot2 = isophotprep(iso2,isonames)
sumdist2,meddist2,chisq2,dist2 = isocomparison(cphot,isophot2,cphoterr)
lnlike2 = 0.5*chisq2
grad[pcount] = (lnlike2-lnlike0)/step
pcount += 1
# Derivative in extinction
if fixpars[2]==False:
iso3 = iso0.copy()
step = 0.05
iso3.ext += step
isophot3 = isophotprep(iso3,isonames)
sumdist3,meddist3,chisq3,dist3 = isocomparison(cphot,isophot3,cphoterr)
lnlike3 = 0.5*chisq3
#jac[:,pcount] = dist3-dist0
#jac[:,pcount] = (sumdist3-sumdist0)/step
grad[pcount] = (lnlike3-lnlike0)/step
pcount += 1
# Derivative in distmod
if fixpars[3]==False:
iso4 = iso0.copy()
step = 0.05
iso4.distmod += step
isophot4 = isophotprep(iso4,isonames)
sumdist4,meddist4,chisq4,dist4 = isocomparison(cphot,isophot4,cphoterr)
lnlike4 = 0.5*chisq4
#jac[:,pcount] = dist4-dist0
#jac[:,pcount] = (sumdist4-sumdist0)/step
grad[pcount] = (lnlike4-lnlike0)/step
pcount += 1
return lnlike0,grad
def gradiso(theta,cphot,cphoterr,grid,isonames,fixpars,fixparvals):
""" Calculate gradient for Isochrone fits."""
pars = allpars(theta,fixpars,fixparvals)
ncat = len(cphoterr)
nfreepars = np.sum(~fixpars)
grad = np.zeros(nfreepars,float)
pcount = 0
print('gradiso: ',pars)
# Original model
iso0 = grid(*pars)
isophot0 = isophotprep(iso0,isonames)
sumdist0,meddist0,chisq0,dist0 = isocomparison(cphot,isophot0,cphoterr)
lnlike0 = -0.5*chisq0
# Derivative in age
if fixpars[0]==False:
pars1 = np.array(pars).copy()
step = 0.05*pars[0]
if pars1[0]+step>grid.maxage:
step -= step
pars1[0] += step
iso1 = grid(*pars1)
isophot1 = isophotprep(iso1,isonames)
sumdist1,meddist1,chisq1,dist1 = isocomparison(cphot,isophot1,cphoterr)
lnlike1 = -0.5*chisq1
#jac[:,pcount] = dist1-dist0
#jac[:,pcount] = (sumdist1-sumdist0)/step
grad[pcount] = (lnlike1-lnlike0)/step
pcount += 1
# Derivative in metallicity
if fixpars[1]==False:
pars2 = np.array(pars).copy()
step = 0.05
if pars2[1]+step>grid.maxmetal:
step -= step
pars2[1] += step
iso2 = grid(*pars2)
isophot2 = isophotprep(iso2,isonames)
sumdist2,meddist2,chisq2,dist2 = isocomparison(cphot,isophot2,cphoterr)
lnlike2 = -0.5*chisq2
#jac[:,pcount] = dist2-dist0
#jac[:,pcount] = (sumdist2-sumdist0)/step
grad[pcount] = (lnlike2-lnlike0)/step
pcount += 1
# Derivative in extinction
if fixpars[2]==False:
iso3 = iso0.copy()
step = 0.05
iso3.ext += step
isophot3 = isophotprep(iso3,isonames)
sumdist3,meddist3,chisq3,dist3 = isocomparison(cphot,isophot3,cphoterr)
lnlike3 = -0.5*chisq3
#jac[:,pcount] = dist3-dist0
#jac[:,pcount] = (sumdist3-sumdist0)/step
grad[pcount] = (lnlike3-lnlike0)/step
pcount += 1
# Derivative in distmod
if fixpars[3]==False:
iso4 = iso0.copy()
step = 0.05
iso4.distmod += step
isophot4 = isophotprep(iso1,isonames)
sumdist4,meddist4,chisq4,dist4 = isocomparison(cphot,isophot4,cphoterr)
lnlike4 = -0.5*chisq4
#jac[:,pcount] = dist4-dist0
#jac[:,pcount] = (sumdist4-sumdist0)/step
grad[pcount] = (lnlike4-lnlike0)/step
pcount += 1
return -grad
def hessiso(theta,cphot,cphoterr,grid,isonames,fixpars,fixparvals,diag=False):
""" Calculate hessian matrix, second derivaties wrt parameters."""
pars = allpars(theta,fixpars,fixparvals)
ncat = len(cphoterr)
nfreepars = np.sum(~fixpars)
freeparsind, = np.where(fixpars==False)
hess = np.zeros((nfreepars,nfreepars),float)
# Original model
iso0 = grid(*pars)
isophot0 = isophotprep(iso0,isonames)
sumdist0,meddist0,chisq0,dist0 = isocomparison(cphot,isophot0,cphoterr)
lnlike0 = 0.5*chisq0
steps = [0.05*pars[0],0.05,0.05,0.05]
# Loop over all free parameters
for i in range(nfreepars):
ipar = freeparsind[i]
istep = steps[ipar]
# Make sure steps don't go beyond boundaries
if ipar==0 and (pars[0]+2*istep)>grid.maxage:
istep = -istep
if ipar==1 and (pars[1]+2*istep)>grid.maxmetal:
istep = -istep
# Second loop
for j in np.arange(0,i+1):
jpar = freeparsind[j]
jstep = steps[ipar]
# Make sure steps don't go beyond boundaries
if jpar==0 and (pars[0]+2*jstep)>grid.maxage:
jstep = -jstep
if jpar==1 and (pars[1]+2*jstep)>grid.maxmetal:
jstep = -jstep
# Calculate the second derivative wrt i and j
# Second derivative of same parameter
# Derivative one step forward and two steps forward
# then take the derivative of these two derivatives
if i==j:
# First first-derivative
pars1 = pars.copy()
pars1[ipar] += istep
if ipar<2:
iso1 = grid(*pars1)
elif ipar==2:
iso1 = iso0.copy()
iso1.ext += istep
elif ipar==3:
iso1 = iso0.copy()
iso1.distmod += istep
isophot1 = isophotprep(iso1,isonames)
sumdist1,meddist1,chisq1,dist1 = isocomparison(cphot,isophot1,cphoterr)
lnlike1 = 0.5*chisq1
deriv1 = (lnlike1-lnlike0)/istep
# Second first-derivative
pars2 = pars.copy()
pars2[ipar] += 2*istep
if ipar<2:
iso2 = grid(*pars2)
elif ipar==2:
iso2 = iso0.copy()
iso2.ext += 2*istep
elif ipar==3:
iso2 = iso0.copy()
iso2.distmod += 2*istep
isophot2 = isophotprep(iso2,isonames)
sumdist2,meddist2,chisq2,dist2 = isocomparison(cphot,isophot2,cphoterr)
lnlike2 = 0.5*chisq2
deriv2 = (lnlike2-lnlike1)/istep
# Second derivative
deriv2nd = (deriv2-deriv1)/istep
hess[i,j] = deriv2nd
# Two different parameters
# Derivative in i at current position
# Derivative in i at current position plus one step in j
# take derivate of these two derivatives
else:
# Only want diagonal elements
if diag:
continue
# First first-derivative
# derivative in i at current j position
pars1 = pars.copy()
pars1[ipar] += istep
if ipar<2:
iso1 = grid(*pars1)
elif ipar==2:
iso1 = iso0.copy()
iso1.ext += istep
elif ipar==3:
iso1 = iso0.copy()
iso1.distmod += istep
isophot1 = isophotprep(iso1,isonames)
sumdist1,meddist1,chisq1,dist1 = isocomparison(cphot,isophot1,cphoterr)
lnlike1 = 0.5*chisq1
deriv1 = (lnlike1-lnlike0)/istep
# Second first-derivative
# derivatve in i at current position plus one step in j
# Likelihood at current position plust one step in j
pars2 = pars.copy()
pars2[jpar] += jstep
if jpar<2:
iso2 = grid(*pars2)
elif jpar==2:
iso2 = iso0.copy()
iso2.ext += jstep
elif jpar==3:
iso2 = iso0.copy()
iso2.distmod += jstep
isophot2 = isophotprep(iso2,isonames)
sumdist2,meddist2,chisq2,dist2 = isocomparison(cphot,isophot2,cphoterr)
lnlike2 = 0.5*chisq2
# Likelihood at current position plus one step in i and j
pars3 = pars.copy()
pars3[ipar] += istep
pars3[jpar] += jstep
if ipar>=2 and jpar>=2:
if ipar==2:
iso3.ext += istep
elif ipar==3:
iso3.distmod += istep
if jpar==2:
iso3.ext += jstep
elif jpar==3:
iso3.distmod += jstep
else:
iso3 = grid(*pars3)
isophot3 = isophotprep(iso3,isonames)
sumdist3,meddist3,chisq3,dist3 = isocomparison(cphot,isophot3,cphoterr)
lnlike3 = 0.5*chisq3
deriv2 = (lnlike3-lnlike2)/istep
# Second derivative
deriv2nd = (deriv2-deriv1)/jstep
hess[i,j] = deriv2nd
hess[j,i] = deriv2nd
return hess
def fit_mle(cat,catnames,grid,isonames,initpar,caterrnames=None,fixed=None,verbose=False):
""" Isochrone fitting using maximum likelihood estimation (MLE)."""
ncat = len(cat)
cphot,cphoterr = photprep(cat,catnames,caterrnames)
# Checked any fixed values
fixpars = np.zeros(4,bool)
if fixed is not None:
for n in fixed.keys():
if n.lower()=='age':
initpar[0] = fixed[n]
fixpars[0] = True
elif n.lower()=='logage':
initpar[0] = 10**fixed[n]
fixpars[0] = True
elif n.lower()=='metal' or n.lower()=='feh' or n.lower()=='fe_h':
initpar[1] = fixed[n]
fixpars[1] = True
elif n.lower()=='ext' or n.lower()=='extinction':
initpar[2] = fixed[n]
fixpars[2] = True
elif n.lower()=='distance' or n.lower()=='dist':
initpar[3] = np.log10(fixed[n]*1e3)*5-5
fixpars[3] = True
elif n.lower()=='distmod':
initpar[3] = fixed[n]
fixpars[4] = True
nfixpars = np.sum(fixpars)
nfreepars = np.sum(~fixpars)
freeparsind, = np.where(fixpars==False)
if nfixpars>0:
fixparsind, = np.where(fixpars==True)
fixparvals = np.zeros(nfixpars,float)
fixparvals[:] = np.array(initpar)[fixparsind]
initpar = np.delete(initpar,fixparsind)
else:
fixparvals = []
# Bounds
lbounds = np.zeros(4,float)
lbounds[0] = grid.minage
lbounds[1] = grid.minmetal
lbounds[2] = 0.0
lbounds[3] | |
# Copyright 2020-present, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
best_args = {'perm-mnist': {
'sgd': {-1: {'lr': 0.2, 'batch_size': 128, 'n_epochs': 1}},
'ewc_on': {-1: {'lr': 0.1,
'e_lambda': 0.7,
'gamma': 1.0,
'batch_size': 128,
'n_epochs': 1}},
'si': {-1: {'lr': 0.1,
'c': 0.5,
'xi': 1.0,
'batch_size': 128,
'n_epochs': 1}},
'er': {200: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'gem': {200: {'lr': 0.1,
'gamma': 0.5,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.1, 'gamma': 0.5, 'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.1, 'gamma': 0.5, 'batch_size': 128,
'n_epochs': 1}},
'agem': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'hal': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.5,
'gamma': 0.1,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.3,
'gamma': 0.1,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.5,
'gamma': 0.1,
'n_epochs': 1}},
'gss': {200: {'lr': 0.2,
'minibatch_size': 10,
'gss_minibatch_size': 128,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'gss_minibatch_size': 10,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
5120: {'lr': 0.03,
'minibatch_size': 128,
'gss_minibatch_size': 10,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1}},
'agem_r': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'fdr': {200: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 0.3,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 1,
'batch_size': 128,
'n_epochs': 1}},
'der': {200: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'batch_size': 128,
'n_epochs': 1}},
'derpp': {200: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 1.0,
'beta': 1.0,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1.0,
'beta': 0.5,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'beta': 1.0,
'batch_size': 128,
'n_epochs': 1}}},
'rot-mnist': {
'sgd': {-1: {'lr': 0.2, 'batch_size': 128, 'n_epochs': 1}},
'ewc_on': {-1: {'lr': 0.1,
'e_lambda': 0.7,
'gamma': 1.0,
'batch_size': 128,
'n_epochs': 1}},
'si': {-1: {'lr': 0.1,
'c': 1.0,
'xi': 1.0,
'batch_size': 128,
'n_epochs': 1}},
'er': {200: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'gem': {200: {'lr': 0.01,
'gamma': 0.5,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.01, 'gamma': 0.5, 'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.01, 'gamma': 0.5, 'batch_size': 128,
'n_epochs': 1}},
'agem': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.3,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.3,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'hal': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.2,
'beta': 0.5,
'gamma': 0.1,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.5,
'gamma': 0.1,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.3,
'gamma': 0.1,
'n_epochs': 1}},
'gss': {200: {'lr': 0.2,
'minibatch_size': 10,
'gss_minibatch_size': 128,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'gss_minibatch_size': 128,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'gss_minibatch_size': 128,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1}},
'agem_r': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.3,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.3,
'minibatch_size': 128,
'batch_size': 128,
'n_epochs': 1}},
'fdr': {200: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.3,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1,
'batch_size': 128,
'n_epochs': 1}},
'der': {200: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'batch_size': 128,
'n_epochs': 1}},
'derpp': {200: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 1.0,
'beta': 0.5,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'beta': 1.0,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.2,
'minibatch_size': 128,
'alpha': 0.5,
'beta': 0.5,
'batch_size': 128,
'n_epochs': 1}}},
'seq-mnist': {
'sgd': {-1: {'lr': 0.03, 'batch_size': 10, 'n_epochs': 1}},
'ewc_on': {-1: {'lr': 0.03,
'e_lambda': 90,
'gamma': 1.0,
'batch_size': 10,
'n_epochs': 1}},
'si': {-1: {'lr': 0.1,
'c': 1.0,
'xi': 0.9,
'batch_size': 10,
'n_epochs': 1}},
'lwf': {-1: {'lr': 0.03,
'alpha': 1,
'softmax_temp': 2.0,
'batch_size': 10,
'n_epochs': 1,
'wd_reg': 0.0005}},
'pnn': {-1: {'lr': 0.1, 'batch_size': 10, 'n_epochs': 1}},
'er': {200: {'lr': 0.01,
'minibatch_size': 10,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 10,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 10,
'batch_size': 10,
'n_epochs': 1}},
'mer': {200: {'lr': 0.1,
'minibatch_size': 128,
'beta': 1,
'gamma': 1,
'batch_num': 1,
'batch_size': 1,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'beta': 1,
'gamma': 1,
'batch_num': 1,
'batch_size': 1,
'n_epochs': 1},
5120: {'lr': 0.03,
'minibatch_size': 128,
'beta': 1,
'gamma': 1,
'batch_num': 1,
'batch_size': 1,
'n_epochs': 1}},
'gem': {200: {'lr': 0.01,
'gamma': 1.0,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.03, 'gamma': 0.5, 'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1, 'gamma': 1.0, 'batch_size': 10,
'n_epochs': 1}},
'agem': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1}},
'hal': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.7,
'gamma': 0.5,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.2,
'gamma': 0.5,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 128,
'hal_lambda': 0.1,
'beta': 0.7,
'gamma': 0.5,
'n_epochs': 1}},
'gss': {200: {'lr': 0.1,
'minibatch_size': 10,
'gss_minibatch_size': 10,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 10,
'gss_minibatch_size': 10,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'gss_minibatch_size': 10,
'batch_size': 128,
'batch_num': 1,
'n_epochs': 1}},
'agem_r': {200: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'batch_size': 10,
'n_epochs': 1}},
'icarl': {200: {'lr': 0.1,
'minibatch_size': 10,
'wd_reg': 0,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 10,
'wd_reg': 0,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 10,
'wd_reg': 0,
'batch_size': 10,
'n_epochs': 1}},
'fdr': {200: {'lr': 0.03,
'minibatch_size': 128,
'alpha': 0.5,
'batch_size': 128,
'n_epochs': 1},
500: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 0.2,
'batch_size': 128,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 0.2,
'batch_size': 128,
'n_epochs': 1}},
'der': {200: {'lr': 0.03,
'minibatch_size': 10,
'alpha': 0.2,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.03,
'minibatch_size': 128,
'alpha': 1.0,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 128,
'alpha': 0.5,
'batch_size': 10,
'n_epochs': 1}},
'derpp': {200: {'lr': 0.03,
'minibatch_size': 128,
'alpha': 0.2,
'beta': 1.0,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.03,
'minibatch_size': 10,
'alpha': 1.0,
'beta': 0.5,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 64,
'alpha': 0.2,
'beta': 0.5,
'batch_size': 10,
'n_epochs': 1}},
'per1': {200: {'lr': 0.03,
'minibatch_size': 128,
'alpha': 0.2,
'beta': 1.0,
'lmd': 0.5,
'pseudo_size': 2,
'batch_size': 10,
'n_epochs': 1},
500: {'lr': 0.03,
'minibatch_size': 10,
'alpha': 1.0,
'beta': 0.5,
'lmd': 0.5,
'pseudo_size': 2,
'batch_size': 10,
'n_epochs': 1},
5120: {'lr': 0.1,
'minibatch_size': 64,
'alpha': 0.2,
'beta': 0.5,
'lmd': 0.5,
'pseudo_size': 2,
'batch_size': 10,
'n_epochs': 1}}},
'seq-cifar10': {'sgd': {-1: {'lr': 0.1,
'batch_size': 32,
'n_epochs': 50}},
'ewc_on': {-1: {'lr': 0.03,
'e_lambda': 10,
'gamma': 1.0,
'batch_size': 32,
'n_epochs': 50}},
'si': {-1: {'lr': 0.03,
'c': 0.5,
'xi': 1.0,
'batch_size': 32,
'n_epochs': 50}},
'lwf': {-1: {'lr': 0.01,
'alpha': 3.0,
'softmax_temp': 2.0,
'batch_size': 32,
'n_epochs': 50,
'wd_reg': 0.0005}},
'pnn': {-1: {'lr': 0.03, 'batch_size': 32,
'n_epochs': 50}},
'er': {200: {'lr': 0.1,
'minibatch_size': 32,
'batch_size': 32,
'n_epochs': 50},
500: {'lr': 0.1,
'minibatch_size': 32,
'batch_size': 32,
'n_epochs': | |
iter <= max_iter:
#step 1, parallel
b_0 = -Phi - Theta + nu * (W_i_h + W_i_t)
for t in range(T):
ind = np.ravel_multi_index((i,t),(N,T))
b = USP[(i,t,ll)] + b_0[:,t]
if not big_data:
W_i[:,t] = np.dot(A_inv[ind], b)
else:
tmp = A_inv[str(i) + '/' + str(t)]
W_i[:,t] = np.dot(tmp, b)
#step 2
W_i_t = np.sign(W_i + Theta/nu)*np.maximum(np.absolute(W_i + Theta/nu)-1.0*lam/nu,0)
#step 3, parallel
tmp = W_i + 1.0*Phi/nu
for j in range(d_i):
#print j
W_i_h[j,:] = ptv.tv1_1d(tmp[j,:], 1.0*mu/nu)
#step 4
Theta = Theta + nu*(W_i - W_i_t)
#step 5
Phi = Phi + nu*(W_i - W_i_h)
iter += 1
if iter%10 == 0:
loss_1 = loss_0
loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, SVD_ijt, lam, mu, nu)
if (np.sum(np.absolute(W_i-W_i_h)+np.absolute(W_i-W_i_t))/(np.sum(abs(W_i)) + 1e-2) < tol) and abs(loss_0 -loss_1)/loss_0 < tol and iter > 500:
if (test and _cvx_conv):
tmp = val_config.time
val_config.time = tmp + time.time() - s_time
break
if not test:
break
if test and not _cvx_conv:
if abs(org_f(W_i, i, ll, SVD_ijt) - cvx_val)/cvx_val < 0.1 or org_f(W_i, i, ll, SVD_ijt) <= cvx_val:
_cvx_conv = True
if iter > max_iter:
warnings.warn(str(lam)+' '+ str(mu)+ ' '+ str(nu)+'warning: does not converge!')
_conv = False
if d_i > 1000 and iter%100 == 0:
print time.time() - s_time
T_dif_i = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, SVD_ijt, lam, mu, nu, out_put=out_put, choice='final')
return W_i, _conv, T_dif_i, W_i_t, W_i_h, Theta, Phi
# def cvx_1(i_ll, SVD_ijt, tol=1e-2, max_iter=1000, lam=lam, mu=mu, nu=nu):
# i, ll = i_ll
# tmp = val_config.cvx_time
# s_time = time.time()
# W_i = Variable(di[i],T)
# W_i_t = Variable(di[i],T)
# W_i_h = Variable(di[i],T)
# funcs = []
# for t in range(T):
# Sigma_iti, U_1it= SVD_x[np.ravel_multi_index((i,t),(N,T))][1:3]
# for j in range(i+1, N):
# funcs.append(0.5*sum_squares(np.transpose(U_1it) * W_i[:,t] - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((i,j,t),(N,N,T))][0][:,ll]).reshape((-1,1))))
# for w in range(i):
# funcs.append(0.5*sum_squares(np.transpose(U_1it) * W_i[:,t] - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((w,i,t),(N,N,T))][1][:,ll]).reshape((-1,1))))
# D = np.zeros((T,T-1))
# for t in range(T-1):
# D[t,t] = 1
# D[t+1,t] = -1
# funcs.append(norm(W_i_h*D,1)*mu)
# funcs.append(norm(W_i_t,1)*lam)
# constraints = [W_i_h == W_i, W_i_t == W_i]
# #funcs.append(sum_squares(W_i)*1e-6)
# prob = Problem(Minimize(sum(funcs)), constraints)
# result = prob.solve(max_iters=int(max_iter*(T**3)*(di[i]**3)), reltol=1e-6, abstol=1e32)
# #result = prob.solve(max_iters=int(max_iter*(T**3)*(di[i]**3)))
# val_config.cvx_time = time.time() - s_time + tmp
# print W_i.value[:,0], prob.value
# print W_i.value[:,1]
# return W_i.value, prob.value
# def cvx_admm(i_ll, SVD_ijt, tol=1e-2, max_iter=1000, lam=lam, mu=mu, nu=nu, cvx_val = None):
# i, ll = i_ll
# s_time = time.time()
# W_i = Variable(di[i], T)
# W_i_h = Variable(di[i], T)
# W_i_t = Variable(di[i], T)
# Theta = Variable(di[i], T)
# Phi = Variable(di[i], T)
# W_i.value = W_cvx_admm[(i,ll)]
# W_i_h.value = W_h_cvx_admm[(i,ll)]
# W_i_t.value = W_t_cvx_admm[(i,ll)]
# Theta.value = Theta_all_cvx_admm[(i,ll)]
# Phi.value = Phi_all_cvx_admm[(i,ll)]
# def opt_admm_1(W_i, W_i_t, W_i_h, Theta, Phi):
# funcs = []
# for t in range(T):
# Sigma_iti, U_1it= SVD_x[np.ravel_multi_index((i,t),(N,T))][1:3]
# for j in range(i+1, N):
# funcs.append(0.5*sum_squares(np.transpose(U_1it) * W_i[:,t] - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((i,j,t),(N,N,T))][0][:,ll]).reshape((-1,1))))
# for w in range(i):
# funcs.append(0.5*sum_squares(np.transpose(U_1it) * W_i[:,t] - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((w,i,t),(N,N,T))][1][:,ll]).reshape((-1,1))))
# funcs.append(sum_squares(W_i - W_i_h.value)*nu/2.0)
# funcs.append(sum_squares(W_i - W_i_t.value)*nu/2.0)
# funcs.append(trace(np.transpose(Theta.value)*(W_i - W_i_t.value)))
# funcs.append(trace(np.transpose(Phi.value)*(W_i-W_i_h.value)))
# prob = Problem(Minimize(sum(funcs)))
# result = prob.solve(max_iters=int(max_iter*(T**3)*(di[i]**3)), abstol=1e32, reltol=tol, solver=CVXOPT)
# def opt_admm_2(W_i, W_i_t, W_i_h, Theta, Phi):
# funcs = []
# funcs.append(norm(W_i_t,1)*lam)
# funcs.append(trace(np.transpose(Theta.value)*(W_i.value - W_i_t)))
# funcs.append(sum_squares(W_i.value - W_i_t)*nu/2.0)
# prob = Problem(Minimize(sum(funcs)))
# result = prob.solve(max_iters=int(max_iter*(T**3)*(di[i]**3)), abstol=1e32, reltol=tol)
# def opt_admm_3(W_i, W_i_t, W_i_h, Theta, Phi):
# funcs = []
# D = np.zeros((T,T-1))
# for t in range(T-1):
# D[t,t] = 1
# D[t+1,t] = -1
# funcs.append(norm(W_i_h*D,1)*mu)
# funcs.append(trace(np.transpose(Theta.value)*(W_i.value - W_i_h)))
# funcs.append(sum_squares(W_i.value - W_i_h)*nu/2.0)
# prob = Problem(Minimize(sum(funcs)))
# result = prob.solve(max_iters=int(max_iter*(T**3)*(di[i]**3)), abstol=1e32, reltol=tol)
# iter = 0
# while iter <= max_iter:
# #step 1, parallel
# opt_admm_1(W_i, W_i_t, W_i_h, Theta, Phi)
# #l1 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, P, SVD_ijt)
# #if l1 >= l5+tol_dif:
# # test_admm(lam, mu, nu, 1, l1, l5)
# # break
# #step 2
# opt_admm_2(W_i, W_i_t, W_i_h, Theta, Phi)
# #l2 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, P, SVD_ijt)
# #if l2 >= l1+tol_dif:
# # test_admm(lam, mu, nu, 2, l2, l1)
# # break
# #step 3, parallel
# opt_admm_3(W_i, W_i_t, W_i_h, Theta, Phi)
# #l3 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, P, SVD_ijt)
# #if l3 >= l2+tol_dif:
# # test_admm(lam, mu, nu, 3, l3, l2)
# # break
# #step 4
# Theta.value = Theta.value + nu*(W_i.value - W_i_t.value)
# #l4 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, P, SVD_ijt)
# #if l4 <=l3 - tol_dif:
# # test_admm(lam, mu, nu, 4, l4, l3)
# # break
# #step 5
# Phi.value = Phi.value + nu*(W_i.value - W_i_h.value)
# #l5 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, i, ll, P, SVD_ijt)
# #if l5 <= l4 - tol_dif:
# # test_admm(lam, mu, nu, 5, l5, l4)
# # break
# iter += 1
# if test:
# if abs(org_f(W_i.value, i, ll, SVD_ijt) - cvx_val)/cvx_val < tol:
# tmp = val_config.cvx_time_admm
# val_config.time = tmp + time.time() - s_time
# break
def obj_value_3(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll):
"""
compute the value of objective funciton (ADMM)
"""
loss = 0
D = np.zeros((T,T-1))
for t in range(T-1):
D[t,t] = 1
D[t+1,t] = -1
loss += np.sum(np.absolute(np.dot(W_i_h, D)))
loss0 = np.sum(np.absolute(W_i_t))
loss_admm_1 = (np.linalg.norm(W_i-W_i_h,'fro')**2 + np.linalg.norm(W_i-W_i_t,'fro')**2)
loss_admm_3 = np.sum((W_i-W_i_t)*Theta) + np.sum((W_i-W_i_h)*Phi)
loss_admm_2 = 0
loss_admm_4 = 0
for t in range(T):
Sigma_iti, U_1it= SVD_x[np.ravel_multi_index((i,t),(N,T))][1:3]
for j in range(i+1,N):
j_mat = (np.dot(np.transpose(U_1it), W_i[:,t]) - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((i,j,t),(N,N,T))][0][:,ll])).reshape((-1,1))
loss_admm_2 += np.linalg.norm(j_mat,'fro')**2
loss_admm_4 += np.dot(Psi[t][:,j], j_mat)[0]
for w in range(i):
w_mat = (np.dot(np.transpose(U_1it), W_i[:,t]) - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((w,i,t),(N,N,T))][1][:,ll])).reshape((-1,1))
loss_admm_2 += np.linalg.norm(w_mat,'fro')**2
loss_admm_4 += np.dot(Psi[t][:,w], w_mat)[0]
if out_put:
print 'without tuning para:', loss, loss0, loss_admm_1, loss_admm_2, loss_admm_3, loss_admm_4
print 'with tuning para:', loss, loss0*lam, nu/2.0*loss_admm_1, mu/2.0*loss_admm_2, loss_admm_3, loss_admm_4
loss = loss + loss0*lam + nu/2.0*loss_admm_1 + mu/2.0*loss_admm_2 + loss_admm_3 + loss_admm_4
return loss
def admm_sep_3(i_ll, tol=1e-2, max_iter=1000):
"""
computation for one view of data
"""
i, ll = i_ll
obj_value = obj_value_3
W_i = W[(i,ll)]
W_i_h = np.random.uniform(size=W_i.shape)
W_i_t = np.random.uniform(size=W_i.shape)
Theta = np.random.uniform(size=W_i.shape)
Phi = np.random.uniform(size=W_i.shape)
#r,N,T
Psi = dict()
for t in range(T):
r = SVD_x[np.ravel_multi_index((i,t),(N,T))][2].shape[1]
Psi[t] = np.random.uniform(size=(r, N))
Psi[t][:,i] = 0
d_i = W_i.shape[0]
l6 = 1e32
loss_0 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll)
loss_1 = loss_0 + 1
iter = 0
_conv = True
while (np.sum(np.absolute(W_i-W_i_h)+np.absolute(W_i-W_i_t)) > tol or abs(loss_0 -loss_1) > tol) and iter <= max_iter:
#step 1, parallel
for t in range(T):
U_1it= SVD_x[np.ravel_multi_index((i,t),(N,T))][2]
ind = np.ravel_multi_index((i,t),(N,T))
A = mu*(N-1) * UTU[ind] + 2*nu*np.eye(d_i)
b = -Phi[:,t] - Theta[:,t] + mu*np.dot(US[ind], P[ind][:,ll]) + nu * (W_i_h[:,t] + W_i_t[:,t]) - np.dot(U_1it,np.sum(Psi[t],axis=1).reshape((-1,1))).reshape((-1,))
W_i[:,t] = np.linalg.solve(A, b)
l1 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll)
if l1 >= l6+tol_dif:
test_admm(lam, mu, nu, 1, l1, l6)
break
#step 2
W_i_t = np.sign(W_i + Theta/nu)*np.maximum(np.absolute(W_i + Theta/nu)-1.0*lam/nu,0)
l2 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll)
if l2 >= l1+tol_dif:
test_admm(lam, mu, nu, 2, l2, l1)
break
#step 3, parallel
tmp = W_i + Phi/nu
for j in range(d_i):
W_i_h[j,:] = ptv.tv1_1d(tmp[j,:], 1.0/nu)
l3 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll)
if l3 >= l2+tol_dif:
test_admm(lam, mu, nu, 3, l3, l2)
break
#step 4, parallel
for t in range(T):
Sigma_iti, U_1it= SVD_x[np.ravel_multi_index((i,t),(N,T))][1:3]
tmp = np.dot(np.transpose(U_1it), W_i[:,t])
for j in range(i+1,N):
Psi[t][:,j] = Psi[t][:,j] + mu * (tmp - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((i,j,t),(N,N,T))][0][:,ll]))
for w in range(i):
Psi[t][:,w] = Psi[t][:,w] + mu * (tmp - np.dot(Sigma_iti, SVD_ijt[np.ravel_multi_index((w,i,t),(N,N,T))][1][:,ll]))
l4 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, ll)
if l4 <= l3-tol_dif:
test_admm(lam, mu, nu, 4, l4, l3)
break
#step 5
Theta = Theta + nu*(W_i - W_i_t)
l5 = obj_value(W_i, W_i_t, W_i_h, Phi, Theta, Psi, i, | |
"""
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
Contains several tools to convert, load, save and plot data
"""
import warnings
import os
import copy
import pandas as pd
import numpy as np
import uproot
import pickle
from . import dev_tool
# both produce error (27.07.2016) when importing them if run from main.py.
# No problem when run as main...
# from raredecay.tools import dev_tool
from .. import meta_config as meta_cfg
def apply_cuts(signal_data, bkg_data, percent_sig_to_keep=100, bkg_length=None):
"""Search for best cut on value to still keep percent_sig_to_keep of signal
Parameters
----------
signal_data : 1-D numpy array
The signal
bkg_data : 1-D numpy array
The background data
percent_sig_to_keep : 0 < float <= 100
What percentage of the data to keep in order to apply the cuts.
"""
# if percent_sig_to_keep < 100:
# raise NotImplementedError("percentage of < 100 not yet imlemented")
percentile = [0, percent_sig_to_keep] # TODO: modify for percent_sig_to_keep
bkg_length_before = len(bkg_data)
bkg_length = len(bkg_data) if bkg_length in (None, 0) else bkg_length
lower_cut, upper_cut = np.percentile(signal_data, percentile)
cut_bkg = np.count_nonzero(
np.logical_or(bkg_data < lower_cut, bkg_data > upper_cut)
)
rejected_bkg = (bkg_length_before - cut_bkg) / bkg_length
return [lower_cut, upper_cut], rejected_bkg
def make_root_dict(path_to_rootfile, tree_name, branches):
"""Returns a root_numpy compatible "root-dict" of a root-tree.
Parameters
----------
path_to_rootfile : str
The exact path to the root-tree including the filename. Example:
/home/user1/data/myRootTree1.root
tree_name : str
The name of the tree
branches : str or list[str, str, str,... ]
The branches of the tree to use
"""
output = dict(filenames=path_to_rootfile, treename=tree_name, branches=branches)
output = dev_tool.entries_to_str(output)
return output
def add_to_rootfile(rootfile, new_branch, branch_name=None, overwrite=True):
"""Adds a new branch to a given root file.
.. warning:: Overwrite not working currently!
Parameters
----------
rootfile : root-dict
The ROOT-file where the data should be added
new_branch : numpy.array 1-D, list, root-dict
A one-dimensional numpy array that contains the data.
branch_name : str
The name of the branche resp. the name in the dtype of the array.
"""
from root_numpy import array2root
from rootpy.io import root_open
rootfile = dev_tool.entries_to_str(rootfile)
new_branch = dev_tool.entries_to_str(new_branch)
branch_name = dev_tool.entries_to_str(branch_name)
# get the right parameters
# TODO: what does that if there? an assertion maybe?
write_mode = "update"
branch_name = "new_branch1" if branch_name is None else branch_name
if isinstance(rootfile, dict):
filename = rootfile.get("filenames")
treename = rootfile.get("treename")
new_branch = to_ndarray(new_branch)
# new_branch.dtype = [(branch_name, 'f8')]
# write to ROOT-file
write_to_root = False
if os.path.isfile(filename):
with root_open(filename, mode="a") as root_file:
tree = getattr(root_file, treename) # test
if not tree.has_branch(branch_name):
write_to_root = True
# array2tree(new_branch, tree=tree)
# f.write("", TObject.kOverwrite) # overwrite, does not create friends
else:
write_mode = "recreate"
write_to_root = True
if write_to_root:
arr = np.core.records.fromarrays([new_branch], names=branch_name)
array2root(arr=arr, filename=filename, treename=treename, mode=write_mode)
return 0
else:
return 1
# TODO: remove? outdated
def format_data_weights(data_to_shape, weights):
"""Format the data and the weights perfectly. Same length and more.
Change the data to pandas.DataFrame and fill the weights with ones where
nothing or None is specified. Returns both in lists.
Very useful to loop over several data and weights.
Parameters
----------
data_to_shape : (root_dict, numpy.array, pandas.DataFrame)
The data for which we apply the weights. Usual 2-D shape.
weights : (list, numpy.array, pandas.DataFrame, None)
The weights to be reshaped
*Best format* :
[array(weights),array(weights), None, array(weights),...]
*None* can be used if no special weights are specified.
If weights contains less "weight-containing array-like objects" then
data_to_shape does, the difference will be filled with *1*
Return
------
out : list(pandas.DataFrame(data), pandas.DataFrame(data),...)
Return a list containing data
out : list(numpy.array(weight), numpy.array(weight),...)
Return a list with the weights, converted and filled.
"""
# conver the data
if not isinstance(data_to_shape, list):
data_to_shape = [data_to_shape]
data_to_shape = list(map(to_pandas, data_to_shape))
# convert the weights
if not isinstance(weights, list):
weights = [weights]
if weights[0] is not None:
if len(weights[0]) == 1:
weights = [weights]
# convert to pandas
assert isinstance(weights, list), "weights could not be converted to list"
for data_id, data in enumerate(data_to_shape):
if data_id >= len(weights):
weights.append(None)
if weights[data_id] is None:
weights[data_id] = np.array([1] * len(data))
weights[data_id] = to_pandas(weights[data_id]).squeeze().values
return data_to_shape, weights
def obj_to_string(objects, separator=None):
"""Return a string containing all objects as strings, separated by the separator.
Useful for automatic conversion for different types. The following objects
will automatically be converted:
- None will be omitted
Parameters
----------
objects : any object or list(obj, obj, ...) with a string representation
The objects will be converted to a string and concatenated, separated
by the separator.
separator : str
The separator between the objects. Default is " - ".
"""
objects = dev_tool.entries_to_str(objects)
if isinstance(objects, str): # no need to change things
return objects
separator = " - " if separator is None else separator
assert isinstance(separator, str), "Separator not a str"
objects = to_list(objects)
objects = [str(obj) for obj in objects if obj not in (None, "")] # remove Nones
string_out = ""
for word in objects:
string_out += word + separator if word != objects[-1] else word
return string_out
def is_root(data_to_check):
"""Check whether a given data is a root file. Needs dicts to be True."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, dict):
path_name = data_to_check.get("filenames")
# assert isinstance(path_name, str), ("'filenames' of the dictionary " +
# str(data_to_check) + "is not a string")
if path_name.endswith(meta_cfg.ROOT_DATATYPE):
flag = True
return flag
def is_list(data_to_check):
"""Check whether the given data is a list."""
flag = False
if isinstance(data_to_check, list):
flag = True
return flag
def is_ndarray(data_to_check):
"""Check whether a given data is an ndarray."""
flag = False
if isinstance(data_to_check, np.ndarray):
flag = True
return flag
def is_pickle(data_to_check):
"""Check if the file is a pickled file (checks the ending)."""
flag = False
data_to_check = dev_tool.entries_to_str(data_to_check)
if isinstance(data_to_check, str):
if data_to_check.endswith(meta_cfg.PICKLE_DATATYPE):
flag = True
return flag
def to_list(data_in):
"""Convert the data into a list. Does not pack lists into a new one.
If your input is, for example, a string or a list of strings, or a
tuple filled with strings, you have, in general, a problem:
- just iterate through the object will fail because it iterates through the
characters of the string.
- using list(obj) converts the tuple, leaves the list but splits the strings
characters into single elements of a new list.
- using [obj] creates a list containing a string, but also a list containing
a list or a tuple, which you did not want to.
Solution: use to_list(obj), which creates a new list in case the object is
a single object (a string is a single object in this sence) or converts
to a list if the object is already a container for several objects.
Parameters
----------
data_in : any obj
So far, any object can be entered.
Returns
-------
out : list
Return a list containing the object or the object converted to a list.
"""
if isinstance(data_in, (str, int, float)):
data_in = [data_in]
data_in = list(data_in)
return data_in
def to_ndarray(data_in, float_array=False):
"""Convert data to numpy array (containing only floats).
Parameters
----------
data_in : any reasonable data
The data to be converted
"""
import uproot
if is_root(data_in):
with uproot.open(data_in["filenames"]) as file:
tree = file[data_in["treename"]]
branches = to_list(data_in["branches"])
loaded = tree.arrays(branches, library="np")
loaded = np.stack([loaded[branch] for branch in branches])
if len(branches) == 1:
loaded = loaded[0]
data_in = loaded
# change numpy.void to normal floats
if isinstance(data_in, (pd.Series, pd.DataFrame)):
test_sample = data_in.iloc[0]
else:
test_sample = data_in[0]
if isinstance(test_sample, np.void):
data_in = np.array([val[0] for val in data_in])
if isinstance(data_in, (np.recarray, np.ndarray)):
data_in = data_in.tolist()
if is_list(data_in) or isinstance(data_in, pd.Series):
data_in = np.array(data_in)
if not isinstance(data_in[0], (int, float, str, bool)):
if float_array:
iter_data = copy.deepcopy(data_in)
# HACK
data_in = np.ndarray(shape=len(data_in), dtype=data_in.dtype)
# HACK END
for i, element in enumerate(iter_data):
if not isinstance(element, (int, float, str, bool)):
# does that work or should we iterate over copy?
try:
element_len = len(element)
except TypeError:
element_len = 1
if element_len > 1:
data_in[i] = to_ndarray(element)
float_array = False
elif element_len == 1:
data_in[i] = float(element)
warnings.warn("Could not force float array")
if float_array:
data_in = np.asfarray(data_in)
assert is_ndarray(data_in), "Error, could not convert data to numpy array"
return data_in
def to_pandas_old(data_in, index=None, | |
0.00000183 0.00000157
0.9999727 0. 0.00001186 0. ]
[0.00722851 0.01526725 0.43565944 0.53661335 0.000001 0.00006741
0.00000368 0.00106217 0.00325301 0.00084417]
[0.00000036 0.0000674 0.00041761 0.00037681 0.9802423 0.00182406
0.00046407 0.00000784 0.00130148 0.01529808]
[0.00000001 0. 0.00000001 0.00000141 0.00000006 0.00000029
0. 0.999686 0.00000119 0.00031108]
[0.00187015 0.00001764 0.00090042 0.0000522 0.00193849 0.00024995
0.9947358 0.00000837 0.00017743 0.00004954]
[0.00000011 0.0000001 0.00000432 0.00032096 0.00000003 0.00000011
0. 0.9994941 0.00000291 0.00017744]
[0.00000141 0.9990044 0.00000533 0.00000706 0.0000057 0.00000118
0.00000254 0.00000254 0.00096355 0.00000631]
[0.00000001 0.0000108 0.99991035 0.00007405 0. 0.00000002
0. 0. 0.00000482 0. ]
[0.00001307 0.00000195 0.00148911 0.00000377 0.00889535 0.00002313
0.98950535 0.00000071 0.00005413 0.0000134 ]
[0.00000046 0.00000009 0.00000045 0.000066 0.00000217 0.00000359
0. 0.99763095 0.00000234 0.00229383]
[0.00000207 0.0000001 0.0000121 0.0000413 0.9891594 0.00012888
0.00001004 0.0003083 0.00077547 0.00956238]
[0.9511551 0.00000024 0.00074013 0.00295273 0.0000325 0.00259279
0.00002643 0.00419943 0.00036673 0.037934 ]
[0.00019282 0.00000761 0.00009443 0.09269205 0.00001233 0.8905042
0.00000984 0.00667731 0.0050138 0.0047956 ]
[0.02040602 0.00008765 0.968708 0.00675923 0.00000586 0.00206858
0.00000178 0.0001232 0.00132978 0.0005099 ]
[0.00000003 0.00003059 0.99963593 0.00033278 0. 0.
0. 0.00000004 0.0000006 0. ]
[0.00000013 0.00000069 0.00011947 0.00001512 0.9989298 0.00017804
0.00033047 0.00010938 0.00005705 0.00025981]
[0.00000201 0.00000029 0.00007547 0.00003089 0.00005725 0.00001797
0.00000067 0.0000007 0.99963534 0.00017937]
[0.0000008 0.00000371 0.00003787 0.00002322 0.9943652 0.00003114
0.00021515 0.00265931 0.00010596 0.00255771]
[0.00014284 0.00000316 0.00088069 0.00001164 0.00067388 0.00028095
0.9976901 0.00000022 0.00027996 0.00003657]
[0.0000005 0.9998481 0.00001408 0.00005343 0.0000008 0.00000454
0.00001622 0.00000091 0.00005541 0.00000593]
[0.00023124 0.00704155 0.00350841 0.01709301 0.00000095 0.9699637
0.00002507 0.00001335 0.00137216 0.00075052]
[0.00000479 0.9990164 0.00011481 0.00000937 0.00009504 0.00000502
0.00001609 0.00000499 0.0007289 0.00000461]
[0.00000166 0.00000828 0.00018815 0.00073716 0.00172293 0.00000747
0.00000063 0.9669644 0.00002725 0.03034216]
[0.00000044 0.00000447 0.00002037 0.00016557 0.98469293 0.00012634
0.00004816 0.00041926 0.00069912 0.0138233 ]
[0.00014292 0.00000093 0.00007995 0.00000516 0.00020675 0.00001203
0.9995346 0.00000024 0.0000169 0.00000047]
[0.00506382 0.00000374 0.0002552 0.00027891 0.07903916 0.00018168
0.00010331 0.00096852 0.00023008 0.91387564]
[0.00000009 0.00000007 0.00001448 0.00000002 0.00007941 0.00003181
0.99984396 0.00000001 0.00003016 0.00000002]
[0.00000603 0.00112843 0.00001682 0.00072586 0.00090505 0.00011647
0.00000037 0.01774519 0.00015491 0.97920084]
[0.00026336 0.00001345 0.00001828 0.00014518 0.9758664 0.00030628
0.00014845 0.00020289 0.00130243 0.02173321]
[0.00001244 0.00000003 0.00008187 0.00000037 0.00033492 0.00000552
0.99943477 0.00000001 0.00012991 0.00000029]
[0.00002768 0.00000016 0.0000075 0.00003329 0.0000035 0.00004393
0.00005855 0.00000002 0.99981755 0.00000786]
[0.00001368 0.00030855 0.996968 0.00224247 0.00000003 0.00000032
0.0000009 0.00000041 0.00046545 0.00000034]
[0.99287087 0.00001999 0.00041392 0.00026206 0.00008483 0.00311078
0.0000349 0.00118449 0.00009018 0.00192798]
[0.00001255 0.00001728 0.00094703 0.01166332 0.0037362 0.00011856
0.00008581 0.00003245 0.93257076 0.05081612]
[0.00001244 0.00000107 0.00002597 0.1349599 0.00000007 0.86472213
0.00000001 0.00010447 0.00011649 0.00005752]
[0.997045 0.00000375 0.00009739 0.00059744 0.00002181 0.00047617
0.00051483 0.00082663 0.00001866 0.00039836]
[0.00018604 0.9602836 0.00670487 0.0009231 0.00158843 0.00006977
0.00001744 0.02373268 0.00610436 0.00038978]
[0.00074644 0.00106728 0.01006396 0.98194134 0.00000471 0.00072449
0.00000048 0.00276331 0.00000835 0.00267966]
[0.00000097 0.00000132 0.00055702 0.00040382 0.00000239 0.0000008
0.00000004 0.9989436 0.00000032 0.00008967]] (8.981 sec)
INFO:tensorflow:loss = 0.04535694, step = 19901 (18.053 sec)
INFO:tensorflow:probabilities = [[0.00000765 0.00067763 0.00007031 0.00040211 0.01317788 0.00004157
0.00000592 0.01095119 0.00060731 0.97405845]
[0.00000533 0.9992742 0.00006122 0.00003042 0.00002927 0.00003111
0.00026377 0.00005784 0.00024286 0.00000393]
[0.00000044 0.0000018 0.00010396 0.9994825 0.00000021 0.00004319
0. 0.0000007 0.00022163 0.00014566]
[0.00002097 0.000012 0.99297017 0.00614475 0.00002764 0.00001103
0.00013934 0.00026579 0.00039033 0.00001801]
[0.00011129 0.989019 0.00076224 0.00272806 0.00105737 0.00086151
0.00037754 0.00024047 0.00422185 0.00062057]
[0.00000065 0. 0.00002037 0.00003024 0.00000001 0.00000003
0. 0.9999064 0.00000004 0.00004236]
[0.9994179 0.00000071 0.00009872 0.00004887 0.00000062 0.00021747
0.00000551 0.00002415 0.00018135 0.00000475]
[0.00008241 0.00001682 0.00156234 0.00078995 0.00001836 0.000049
0.00000176 0.00000303 0.9971123 0.00036412]
[0.00002495 0.00005561 0.0000799 0.00001841 0.0000452 0.00016285
0.99928826 0. 0.00032489 0.00000005]
[0.00000597 0.9979685 0.00078217 0.00043085 0.00002839 0.00002614
0.00002161 0.00009225 0.00053387 0.00011002]
[0.00000367 0.00035277 0.10645187 0.00280509 0.0000003 0.00000458
0. 0.88897103 0.00011655 0.00129413]
[0.00000172 0.00000823 0.00007013 0.00051655 0.9400513 0.0001431
0.00249006 0.00090225 0.00006812 0.05574859]
[0.00000433 0.00000174 0.00122404 0.00000233 0.04060846 0.0000106
0.9574813 0.00004264 0.00061118 0.00001321]
[0.0003401 0.00009249 0.0012382 0.02733056 0.00000543 0.95210224
0.00323144 0.00000008 0.0155574 0.00010199]
[0.00015733 0.00005015 0.8944204 0.00071527 0.00037583 0.00005218
0.00020808 0.09680494 0.00012465 0.00709118]
[0.00197778 0.8748807 0.00033516 0.00014032 0.02708189 0.00000526
0.00036825 0.05774886 0.0045981 0.03286362]
[0.00000591 0.00010979 0.00021057 0.00008529 0.9939388 0.00016252
0.00044275 0.000149 0.00159601 0.00329916]
[0.00002208 0.00012365 0.00477796 0.94813013 0.00000792 0.00010875
0.00002013 0.00000018 0.04680468 0.0000045 ]
[0.00131732 0.04515383 0.00116664 0.9081464 0.00000041 0.04196856
0.00000458 0.00058823 0.00002821 0.00162572]
[0.00595316 0.0137182 0.00584991 0.00160463 0.02517628 0.71712446
0.08162494 0.00140687 0.14662682 0.00091468]
[0.00029991 0.04398823 0.0053368 0.84451973 0.0070242 0.00857224
0.00021612 0.02161558 0.01525382 0.0531734 ]
[0.00000781 0.00000009 0.00000053 0.00132195 0.00000167 0.99825054
0.0000018 0.0000029 0.00038045 0.00003228]
[0.00003107 0.00000147 0.00055555 0.00038858 0.00001752 0.00001647
0. 0.9821292 0.00001128 0.01684892]
[0.99910694 0.00000001 0.00007307 0.00000122 0.00000001 0.00081686
0.00000086 0.00000029 0.00000048 0.00000047]
[0.00000091 0.00000003 0.00005471 0.00000006 0.99974614 0.00000408
0.00018169 0.00000028 0.0000001 0.00001199]
[0.00007509 0.00001418 0.03682373 0.18470249 0.04496605 0.54248315
0.0001893 0.00001995 0.01875614 0.17196989]
[0.00000917 0.00000002 0.00000037 0.00000855 0.0000156 0.00005502
0.0000012 0.00000018 0.9997732 0.0001367 ]
[0.00085544 0.00000355 0.00133437 0.00000106 0.00061557 0.00026336
0.99631304 0.00000003 0.0006116 0.00000203]
[0.00010441 0.00000245 0.00028707 0.9823591 0.00063153 0.00483956
0.00000156 0.00588803 0.00010499 0.00578127]
[0.00000094 0.00000189 0.00001181 0.0000579 0.00000037 0.9993254
0.00000458 0.00000036 0.00058479 0.00001202]
[0.99987113 0.00000001 0.00002733 0.00003363 0.00000013 0.0000167
0.00004905 0.0000012 0.00000073 0.00000017]
[0.00007904 0.98263794 0.00292514 0.00154231 0.00008669 0.00449918
0.00019083 0.00095322 0.00671409 0.00037168]
[0.00000358 0.00000833 0.00000174 0.00000148 0.998439 0.00001556
0.00001685 0.00001526 0.0000169 0.00148123]
[0.9999974 0. 0.00000003 0. 0. 0.00000122
0.00000101 0.00000048 0. 0.00000001]
[0.00010104 0.00001749 0.00003032 0.00540673 0.00121681 0.97631675
0.00001153 0.00497118 0.00175706 0.01017107]
[0.00000163 0.00000066 0.00000405 0.0000001 0.00013463 0.00000955
0.9998252 0.00000002 0.00002394 0.00000027]
[0.00000553 0.00001097 0.00014288 0.00005206 0.00000635 0.0000063
0.00000525 0.00000027 0.9997619 0.00000847]
[0.00000168 0.00002246 0.0008422 0.99724185 0.00000001 0.00008269
0. 0.00000372 0.00167254 0.00013284]
[0.00000001 0.00000281 0.0000065 0.9998281 0. 0.00003752
0. 0.00000004 0.00012315 0.00000187]
[0.00002292 0.00000005 0.00000149 0.00011341 0.00154378 0.00002828
0.00000041 0.00147488 0.00012117 0.9966936 ]
[0.00032382 0.0000056 0.00028013 0.00001168 0.00045901 0.00007297
0.9972812 0.00001002 0.00153268 0.00002297]
[0.00000035 0.00000007 0.00000162 0.00000173 0.99859995 0.00010138
0.00000153 0.00000226 0.00004545 0.00124548]
[0.0000043 0.99859124 0.00009122 0.00012444 0.00000423 0.00000595
0.00000155 0.00027186 0.00086075 0.00004442]
[0.0000008 0. 0.00000602 0.00000254 0.98260087 0.00000099
0.00001395 0.00006998 0.00000412 0.01730081]
[0.00015841 0.02213568 0.00101651 0.96367216 0.00008766 0.00264988
0.0000069 0.00067794 0.00238675 0.00720822]
[0.9997781 0.00000001 0.0000446 0.00000025 0.00000231 0.00001734
0.00012549 0.00000011 0.00000083 0.00003112]
[0.00000332 0.00000002 0.00004494 0.00001438 0.98845786 0.00000402
0.0000124 0.00011027 0.00003066 0.01132219]
[0.00000234 0.00113512 0.07932988 0.13101494 0.00000008 0.6143933
0.0005543 0.00000001 0.17356916 0.00000099]
[0.00003336 0.00000039 0.00001007 0.00000052 0.9963767 0.00000237
0.00009443 0.00038928 0.00066243 0.00243051]
[0.00021275 0.9957705 0.00014986 0.00028588 0.00008208 0.00009898
0.00003857 0.00089474 0.00192227 0.0005444 ]
[0.00003547 0.00000106 0.00001823 0.00000053 0.00024886 0.00040601
0.99928623 0. 0.00000361 0.00000016]
[0.00121136 0.00004647 0.00005994 0.00000618 0.00005021 0.00043091
0.9981932 0.00000008 0.0000013 0.00000039]
[0.00040368 0.00002903 0.00087934 0.00028287 0.0002688 0.00315699
0.00005382 0.00034636 0.9905872 0.00399197]
[0.00002782 0.00014833 0.00023138 0.9931706 0.0000076 0.00187144
0.00000238 0.00004292 0.00144521 0.00305227]
[0.00002312 0.9992378 0.00004838 0.00002856 0.00010442 0.00001572
0.00005922 0.00021774 0.00020892 0.00005611]
[0.993357 0.00000001 0.00027187 0.00000625 0.00010446 0.00000299
0.00621115 0.00000787 0.0000006 0.0000379 ]
[0.00000726 0.00000009 0.00013408 0.00000011 0.0001686 0.00000925
0.9996798 0.00000001 0.00000045 0.00000037]
[0.00003066 0.00005972 0.0000114 0.01888036 0.00002667 0.97933084
0.00029926 0.00000565 0.00004892 0.00130659]
[0.00018428 0.00002081 0.00010356 0.0066663 0.00013586 0.9777875
0.00147359 0.00000218 0.01347271 0.0001532 ]
[0.00000192 0.0000013 0.00000418 0.00000207 0.00000021 0.9998301
0.0000019 0.00000049 0.00015727 0.00000043]
[0.00074939 0.0000003 0.00000678 0.00001296 0.00004357 0.00006137
0.00000001 0.98712677 0.00000906 0.01198993]
[0.00000043 0.01042569 0.9872384 0.00041282 0.0000001 0.00000075
0.00000031 0.00000178 0.00191759 0.00000212]
[0.00000262 0.00000039 0.02462034 0.5368669 0.00000003 0.00000228
0. 0.43811658 0.00000098 0.00038979]
[0.03483966 0.00002578 0.11238001 0.000074 0.00671062 0.00139438
0.38273335 0.0000048 0.4618332 0.00000433]
[0.00000004 0.00000137 0.00460128 0.99093586 0. 0.00001995
0. 0.00000267 0.0044384 0.00000031]
[0.00000254 0.00000061 0.00242577 0.0006661 0. 0.00000035
0. 0.9969013 0.00000038 0.00000295]
[0.00000133 0.9992716 0.00002515 0.00001203 0.00000121 0.00000016
0.00001171 0.00001502 0.00065874 0.00000319]
[0.00002087 0.00000214 0.00007729 0.00028717 0.9726061 0.00823937
0.00004383 0.00112739 0.00087601 0.01671973]
[0.00000043 0.00000002 0.9998591 0.00013925 0.00000038 0.00000006
0.00000069 0.00000005 0.00000001 0.00000017]
[0.00000001 0. 0.00000422 0.00000209 0. 0.
0. 0.9999865 0.00000007 0.00000708]
[0.00000022 0.00000061 0.00000425 0.00000164 0.00004083 0.00004235
0.00000341 0.00000142 0.99988747 0.00001781]
[0.00000252 0.00000212 0.00002056 0.0000012 0.00005789 0.00007668
0.9998011 0. 0.00003778 0.00000014]
[0.00072786 0.00120082 0.6797827 0.06275574 0.09106217 0.1104039
0.01714718 0.02902685 0.00604745 0.00184546]
[0.00071485 0.00000001 0.00004097 0.00000001 0.00379103 0.0000021
0.9954464 0.00000022 0.00000013 0.00000424]
[0.00004907 0.9955057 0.0005604 0.00041673 0.00003165 0.00005329
0.00005948 0.00005813 0.00324093 0.00002456]
[0.00002813 0.99918276 0.00005221 0.0000005 0.00000483 0.00000234
0.00007035 0.00000983 0.00064645 0.00000255]
[0.000003 0.997521 0.0004457 0.00020699 0.00019393 0.00003425
0.00002084 0.0002539 0.00124065 0.00007978]
[0.00004878 0.00003413 0.00062794 0.00004293 0.9886339 0.00000506
0.0001055 0.00257614 0.00034519 0.00758034]
[0.00001792 0.00000472 0.00003219 0.00000187 0.00003978 0.00057183
0.99928707 0. 0.00004443 0.00000027]
[0.00000125 0.00003165 0.00000308 0.00219989 0.0036528 0.00007417
0.00000012 0.02560334 0.00000853 0.96842515]
[0.00000151 0. 0.00000356 0.00001687 0.00000003 0.00000131
0. 0.9999163 0.00000029 0.00006016]
[0.00000002 0. 0.000001 0.00000074 0.98542887 0.00001032
0.00000077 0.00000103 0.00000249 0.01455474]
[0.0000183 0.00000004 0.00000101 0.00013528 0.00000124 0.00000578
0. 0.99795085 0.00003899 0.00184848]
[0.00002845 0.99745816 0.0000638 0.00012382 0.00003047 0.00007108
0.00002891 0.00172485 0.00045335 0.00001715]
[0.00000008 0.00000035 0.00000105 0.00004622 0.00000358 0.00000512
0. 0.9996364 0.00003247 0.00027485]
[0.00093047 0.09786985 0.7951297 0.09699854 0.00003969 0.00151935
0.00046356 0.00681029 0.00023492 0.00000369]
[0.99895275 0. 0.00001593 0.00000242 0.00000014 0.00048164
0.00035638 0.00000006 0.00019008 | |
iterator like instance of Product
:rtype: ~paging.models.ProductPaged[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_multiple_pages_retry_second.metadata['url']
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_multiple_pages_retry_second.metadata = {'url': '/paging/multiple/retrysecond'}
def get_single_pages_failure(
self, custom_headers=None, raw=False, **operation_config):
"""A paging operation that receives a 400 on the first call.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Product
:rtype: ~paging.models.ProductPaged[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_single_pages_failure.metadata['url']
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_single_pages_failure.metadata = {'url': '/paging/single/failure'}
def get_multiple_pages_failure(
self, custom_headers=None, raw=False, **operation_config):
"""A paging operation that receives a 400 on the second call.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Product
:rtype: ~paging.models.ProductPaged[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_multiple_pages_failure.metadata['url']
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_multiple_pages_failure.metadata = {'url': '/paging/multiple/failure'}
def get_multiple_pages_failure_uri(
self, custom_headers=None, raw=False, **operation_config):
"""A paging operation that receives an invalid nextLink.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Product
:rtype: ~paging.models.ProductPaged[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_multiple_pages_failure_uri.metadata['url']
# Construct parameters
query_parameters = {}
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_multiple_pages_failure_uri.metadata = {'url': '/paging/multiple/failureuri'}
def get_multiple_pages_fragment_next_link(
self, api_version, tenant, custom_headers=None, raw=False, **operation_config):
"""A paging operation that doesn't return a full URL, just a fragment.
:param api_version: Sets the api version to use.
:type api_version: str
:param tenant: Sets the tenant to use.
:type tenant: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Product
:rtype: ~paging.models.ProductPaged1[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_multiple_pages_fragment_next_link.metadata['url']
path_format_arguments = {
'tenant': self._serialize.url("tenant", tenant, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api_version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = '/paging/multiple/fragment/{tenant}/{nextLink}'
path_format_arguments = {
'tenant': self._serialize.url("tenant", tenant, 'str'),
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api_version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged1(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_multiple_pages_fragment_next_link.metadata = {'url': '/paging/multiple/fragment/{tenant}'}
def get_multiple_pages_fragment_with_grouping_next_link(
self, custom_parameter_group, custom_headers=None, raw=False, **operation_config):
"""A paging operation that doesn't return a full URL, just a fragment with
parameters grouped.
:param custom_parameter_group: Additional parameters for the operation
:type custom_parameter_group: ~paging.models.CustomParameterGroup
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Product
:rtype: ~paging.models.ProductPaged1[~paging.models.Product]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = None
if custom_parameter_group is not None:
api_version = custom_parameter_group.api_version
tenant = None
if custom_parameter_group is not None:
tenant = custom_parameter_group.tenant
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.get_multiple_pages_fragment_with_grouping_next_link.metadata['url']
path_format_arguments = {
'tenant': self._serialize.url("tenant", tenant, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api_version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = '/paging/multiple/fragmentwithgrouping/{tenant}/{nextLink}'
path_format_arguments = {
'tenant': self._serialize.url("tenant", tenant, 'str'),
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api_version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.ProductPaged1(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
get_multiple_pages_fragment_with_grouping_next_link.metadata = {'url': '/paging/multiple/fragmentwithgrouping/{tenant}'}
def _get_multiple_pages_lro_initial(
self, client_request_id=None, paging_get_multiple_pages_lro_options=None, custom_headers=None, raw=False, **operation_config):
maxresults = None
if paging_get_multiple_pages_lro_options is not None:
maxresults = paging_get_multiple_pages_lro_options.maxresults
timeout = None
if paging_get_multiple_pages_lro_options is not None:
timeout = paging_get_multiple_pages_lro_options.timeout
# Construct URL
url = self.get_multiple_pages_lro.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if client_request_id is not None:
header_parameters['client-request-id'] = self._serialize.header("client_request_id", client_request_id, 'str')
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
if maxresults is not None:
header_parameters['maxresults'] = self._serialize.header("maxresults", maxresults, 'int')
if timeout is not None:
header_parameters['timeout'] = self._serialize.header("timeout", timeout, 'int')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('ProductResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_multiple_pages_lro(
self, client_request_id=None, paging_get_multiple_pages_lro_options=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""A long-running paging operation that includes a nextLink that has 10
pages.
:param client_request_id:
:type client_request_id: str
:param paging_get_multiple_pages_lro_options: Additional parameters
for the operation
:type paging_get_multiple_pages_lro_options:
~paging.models.PagingGetMultiplePagesLroOptions
:param dict custom_headers: headers that will be added to the | |
Array),
BinaryExpression(Control, Keyword('lnbdata'), Array, String),
BinaryExpression(Control, Keyword('lnbdeletecolumn'), Number, Nothing),
BinaryExpression(Control, Keyword('lnbdeleterow'), Number, Nothing),
BinaryExpression(Control, Keyword('lnbpicture'), Array, String),
BinaryExpression(Control, Keyword('lnbpictureright'), Array, String),
BinaryExpression(Control, Keyword('lnbsetcolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetcolorright'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetcolumnspos'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetcurselrow'), Number, Nothing),
BinaryExpression(Control, Keyword('lnbsetdata'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpicture'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpicturecolor'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpicturecolorright'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpicturecolorselected'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpicturecolorselectedright'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetpictureright'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsettext'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsettextright'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsetvalue'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsort'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbsortbyvalue'), Array, Nothing),
BinaryExpression(Control, Keyword('lnbtext'), Array, String),
BinaryExpression(Control, Keyword('lnbtextright'), Array, String),
BinaryExpression(Control, Keyword('lnbvalue'), Array, Number),
BinaryExpression(Object, Keyword('loadidentity'), String, Boolean),
BinaryExpression(Object, Keyword('loadmagazine'), Array, Nothing),
BinaryExpression(Control, Keyword('loadoverlay'), Config, Nothing),
BinaryExpression(Object, Keyword('loadstatus'), String, Boolean),
BinaryExpression(Object, Keyword('lock'), Boolean, Nothing),
BinaryExpression(Object, Keyword('lock'), Number, Nothing),
BinaryExpression(Object, Keyword('lockcamerato'), Array, Nothing),
BinaryExpression(Object, Keyword('lockcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('lockcargo'), Boolean, Nothing),
BinaryExpression(Object, Keyword('lockdriver'), Boolean, Nothing),
BinaryExpression(Object, Keyword('lockedcargo'), Number, Boolean),
BinaryExpression(Object, Keyword('lockedturret'), Array, Boolean),
BinaryExpression(Object, Keyword('lockturret'), Array, Nothing),
BinaryExpression(Object, Keyword('lockwp'), Boolean, Nothing),
BinaryExpression(Group, Keyword('lockwp'), Boolean, Nothing),
BinaryExpression(Object, Keyword('lookat'), Object, Nothing),
BinaryExpression(Object, Keyword('lookat'), Array, Nothing),
BinaryExpression(Array, Keyword('lookat'), Object, Nothing),
BinaryExpression(Array, Keyword('lookat'), Array, Nothing),
BinaryExpression(Control, Keyword('lookatpos'), Array, Nothing),
BinaryExpression(Object, Keyword('magazinesturret'), Array, Array),
BinaryExpression(Object, Keyword('magazineturretammo'), Array, Number),
BinaryExpression(Control, Keyword('mapcenteroncamera'), Boolean, Nothing),
BinaryExpression(Number, Keyword('max'), Number, Number),
BinaryExpression(Control, Keyword('menuaction'), Array, String),
BinaryExpression(Control, Keyword('menuadd'), Array, Number),
BinaryExpression(Control, Keyword('menuchecked'), Array, Boolean),
BinaryExpression(Control, Keyword('menucollapse'), Array, Nothing),
BinaryExpression(Control, Keyword('menudata'), Array, String),
BinaryExpression(Control, Keyword('menudelete'), Array, Nothing),
BinaryExpression(Control, Keyword('menuenable'), Array, Nothing),
BinaryExpression(Control, Keyword('menuenabled'), Array, Boolean),
BinaryExpression(Control, Keyword('menuexpand'), Array, Nothing),
BinaryExpression(Control, Keyword('menupicture'), Array, String),
BinaryExpression(Control, Keyword('menusetaction'), Array, Nothing),
BinaryExpression(Control, Keyword('menusetcheck'), Array, Nothing),
BinaryExpression(Control, Keyword('menusetdata'), Array, Nothing),
BinaryExpression(Control, Keyword('menusetpicture'), Array, Nothing),
BinaryExpression(Control, Keyword('menusetvalue'), Array, Nothing),
BinaryExpression(Control, Keyword('menushortcut'), Array, Number),
BinaryExpression(Control, Keyword('menushortcuttext'), Array, String),
BinaryExpression(Control, Keyword('menusize'), Array, Number),
BinaryExpression(Control, Keyword('menusort'), Array, Nothing),
BinaryExpression(Control, Keyword('menutext'), Array, String),
BinaryExpression(Control, Keyword('menuurl'), Array, String),
BinaryExpression(Control, Keyword('menuvalue'), Array, Number),
BinaryExpression(Number, Keyword('min'), Number, Number),
BinaryExpression(Object, Keyword('minedetectedby'), Side, Boolean),
BinaryExpression(Number, Keyword('mod'), Number, Number),
BinaryExpression(Object, Keyword('modeltoworld'), Array, Array),
BinaryExpression(Object, Keyword('modeltoworldvisual'), Array, Array),
BinaryExpression(Object, Keyword('modeltoworldvisualworld'), Array, Array),
BinaryExpression(Object, Keyword('modeltoworldworld'), Array, Array),
BinaryExpression(Object, Keyword('move'), Array, Nothing),
BinaryExpression(Group, Keyword('move'), Array, Nothing),
BinaryExpression(Object, Keyword('moveinany'), Object, Boolean),
BinaryExpression(Object, Keyword('moveincargo'), Object, Nothing),
BinaryExpression(Object, Keyword('moveincargo'), Array, Nothing),
BinaryExpression(Object, Keyword('moveincommander'), Object, Nothing),
BinaryExpression(Object, Keyword('moveindriver'), Object, Nothing),
BinaryExpression(Object, Keyword('moveingunner'), Object, Nothing),
BinaryExpression(Object, Keyword('moveinturret'), Array, Nothing),
BinaryExpression(Control, Keyword('moveobjecttoend'), String, Nothing),
BinaryExpression(Object, Keyword('moveto'), Array, Nothing),
BinaryExpression(Object, Keyword('nearentities'), Number, Array),
BinaryExpression(Object, Keyword('nearentities'), Array, Array),
BinaryExpression(Array, Keyword('nearentities'), Number, Array),
BinaryExpression(Array, Keyword('nearentities'), Array, Array),
BinaryExpression(Array, Keyword('nearestobject'), String, Object),
BinaryExpression(Array, Keyword('nearestobject'), Number, Object),
BinaryExpression(Object, Keyword('nearobjects'), Number, Array),
BinaryExpression(Object, Keyword('nearobjects'), Array, Array),
BinaryExpression(Array, Keyword('nearobjects'), Number, Array),
BinaryExpression(Array, Keyword('nearobjects'), Array, Array),
BinaryExpression(Object, Keyword('nearobjectsready'), Number, Boolean),
BinaryExpression(Array, Keyword('nearobjectsready'), Number, Boolean),
BinaryExpression(Object, Keyword('nearroads'), Number, Array),
BinaryExpression(Array, Keyword('nearroads'), Number, Array),
BinaryExpression(Object, Keyword('nearsupplies'), Number, Array),
BinaryExpression(Object, Keyword('nearsupplies'), Array, Array),
BinaryExpression(Array, Keyword('nearsupplies'), Number, Array),
BinaryExpression(Array, Keyword('nearsupplies'), Array, Array),
BinaryExpression(Object, Keyword('neartargets'), Number, Array),
BinaryExpression(Control, Keyword('newoverlay'), Config, Nothing),
BinaryExpression(Control, Keyword('nmenuitems'), String, Number),
BinaryExpression(Control, Keyword('nmenuitems'), Number, Number),
BinaryExpression(String, Keyword('objstatus'), String, Nothing),
BinaryExpression(Control, Keyword('ondoubleclick'), String, Anything),
BinaryExpression(Type, Keyword('onmapsingleclick'), Code, Nothing),
BinaryExpression(Type, Keyword('onmapsingleclick'), String, Nothing),
BinaryExpression(Control, Keyword('onshownewobject'), String, Anything),
BinaryExpression(Boolean, Keyword('or'), Boolean, Boolean),
BinaryExpression(Boolean, Keyword('or'), Code, Boolean),
BinaryExpression(Array, Keyword('ordergetin'), Boolean, Nothing),
BinaryExpression(Type, Keyword('param'), Array, Anything),
BinaryExpression(Type, Keyword('params'), Array, Boolean),
BinaryExpression(Object, Keyword('playaction'), String, Nothing),
BinaryExpression(Object, Keyword('playactionnow'), String, Nothing),
BinaryExpression(Object, Keyword('playgesture'), String, Nothing),
BinaryExpression(Object, Keyword('playmove'), String, Nothing),
BinaryExpression(Object, Keyword('playmovenow'), String, Nothing),
BinaryExpression(Control, Keyword('posscreentoworld'), Array, Array),
BinaryExpression(Control, Keyword('posworldtoscreen'), Array, Array),
BinaryExpression(String, Keyword('ppeffectadjust'), Array, Nothing),
BinaryExpression(Number, Keyword('ppeffectadjust'), Array, Nothing),
BinaryExpression(String, Keyword('ppeffectcommit'), Number, Nothing),
BinaryExpression(Number, Keyword('ppeffectcommit'), Number, Nothing),
BinaryExpression(Array, Keyword('ppeffectcommit'), Number, Nothing),
BinaryExpression(String, Keyword('ppeffectenable'), Boolean, Nothing),
BinaryExpression(Array, Keyword('ppeffectenable'), Boolean, Nothing),
BinaryExpression(Number, Keyword('ppeffectenable'), Boolean, Nothing),
BinaryExpression(Number, Keyword('ppeffectforceinnvg'), Boolean, Nothing),
BinaryExpression(Number, Keyword('preloadobject'), Object, Boolean),
BinaryExpression(Number, Keyword('preloadobject'), String, Boolean),
BinaryExpression(Control, Keyword('progresssetposition'), Number, Nothing),
BinaryExpression(Number, Keyword('publicvariableclient'), String, Nothing),
BinaryExpression(Array, Keyword('pushback'), Type, Number),
BinaryExpression(Array, Keyword('pushbackunique'), Type, Number),
BinaryExpression(Number, Keyword('radiochanneladd'), Array, Nothing),
BinaryExpression(Number, Keyword('radiochannelremove'), Array, Nothing),
BinaryExpression(Number, Keyword('radiochannelsetcallsign'), String, Nothing),
BinaryExpression(Number, Keyword('radiochannelsetlabel'), String, Nothing),
BinaryExpression(Number, Keyword('random'), Number, Number),
BinaryExpression(Number, Keyword('random'), Array, Number),
BinaryExpression(TeamMember, Keyword('registertask'), String, Boolean),
BinaryExpression(Object, Keyword('remotecontrol'), Object, Nothing),
BinaryExpression(Type, Keyword('remoteexec'), Array, Anything),
BinaryExpression(Type, Keyword('remoteexeccall'), Array, Anything),
BinaryExpression(Object, Keyword('removeaction'), Number, Nothing),
BinaryExpression(Object, Keyword('removealleventhandlers'), String, Nothing),
BinaryExpression(Object, Keyword('removeallmpeventhandlers'), String, Nothing),
BinaryExpression(Object, Keyword('removecuratoraddons'), Array, Nothing),
BinaryExpression(Object, Keyword('removecuratorcameraarea'), Number, Nothing),
BinaryExpression(Object, Keyword('removecuratoreditableobjects'), Array, Nothing),
BinaryExpression(Object, Keyword('removecuratoreditingarea'), Number, Nothing),
BinaryExpression(Control, Keyword('removedrawicon'), Array, Nothing),
BinaryExpression(Control, Keyword('removedrawlinks'), Array, Nothing),
BinaryExpression(Object, Keyword('removeeventhandler'), Array, Nothing),
BinaryExpression(Group, Keyword('removegroupicon'), Number, Nothing),
BinaryExpression(Object, Keyword('removehandgunitem'), String, Nothing),
BinaryExpression(Object, Keyword('removeitem'), String, Nothing),
BinaryExpression(Object, Keyword('removeitemfrombackpack'), String, Nothing),
BinaryExpression(Object, Keyword('removeitemfromuniform'), String, Nothing),
BinaryExpression(Object, Keyword('removeitemfromvest'), String, Nothing),
BinaryExpression(Object, Keyword('removeitems'), String, Nothing),
BinaryExpression(Object, Keyword('removemagazine'), String, Nothing),
BinaryExpression(Object, Keyword('removemagazine'), Array, Nothing),
BinaryExpression(Object, Keyword('removemagazineglobal'), String, Nothing),
BinaryExpression(Object, Keyword('removemagazines'), String, Nothing),
BinaryExpression(Object, Keyword('removemagazinesturret'), Array, Nothing),
BinaryExpression(Object, Keyword('removemagazineturret'), Array, Nothing),
BinaryExpression(Control, Keyword('removemenuitem'), Number, Nothing),
BinaryExpression(Control, Keyword('removemenuitem'), String, Nothing),
BinaryExpression(Object, Keyword('removempeventhandler'), Array, Nothing),
BinaryExpression(Object, Keyword('removeownedmine'), Object, Nothing),
BinaryExpression(Object, Keyword('removeprimaryweaponitem'), String, Nothing),
BinaryExpression(Object, Keyword('removesecondaryweaponitem'), String, Nothing),
BinaryExpression(Object, Keyword('removesimpletask'), Task, Nothing),
BinaryExpression(TeamMember, Keyword('removeteammember'), TeamMember, Nothing),
BinaryExpression(Object, Keyword('removeweapon'), String, Nothing),
BinaryExpression(Object, Keyword('removeweaponattachmentcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('removeweaponcargo'), Array, Nothing),
BinaryExpression(Object, Keyword('removeweaponglobal'), String, Nothing),
BinaryExpression(Object, Keyword('removeweaponturret'), Array, Nothing),
BinaryExpression(Side, Keyword('reportremotetarget'), Array, Nothing),
BinaryExpression(Array, Keyword('resize'), Number, Nothing),
BinaryExpression(Object, Keyword('respawnvehicle'), Array, Nothing),
BinaryExpression(Object, Keyword('reveal'), Object, Nothing),
BinaryExpression(Group, Keyword('reveal'), Object, Nothing),
BinaryExpression(Object, Keyword('reveal'), Array, Nothing),
BinaryExpression(Group, Keyword('reveal'), Array, Nothing),
BinaryExpression(Side, Keyword('revealmine'), Object, Nothing),
BinaryExpression(Array, Keyword('ropeattachto'), Object, Nothing),
BinaryExpression(Object, Keyword('ropedetach'), Object, Nothing),
BinaryExpression(Object, Keyword('saveidentity'), String, Boolean),
BinaryExpression(Object, Keyword('savestatus'), String, Boolean),
BinaryExpression(Object, Keyword('say'), String, Nothing),
BinaryExpression(Array, Keyword('say'), String, Nothing),
BinaryExpression(Object, Keyword('say'), Array, Nothing),
BinaryExpression(Array, Keyword('say'), Array, Nothing),
BinaryExpression(Object, Keyword('say2d'), String, Nothing),
BinaryExpression(Array, Keyword('say2d'), String, Nothing),
BinaryExpression(Object, Keyword('say2d'), Array, Nothing),
BinaryExpression(Array, Keyword('say2d'), Array, Nothing),
BinaryExpression(Object, Keyword('say3d'), String, Nothing),
BinaryExpression(Array, Keyword('say3d'), String, Nothing),
BinaryExpression(Object, Keyword('say3d'), Array, Nothing),
BinaryExpression(Array, Keyword('say3d'), Array, Nothing),
BinaryExpression(Array, Keyword('select'), Number, Anything),
BinaryExpression(Array, Keyword('select'), Boolean, Anything),
BinaryExpression(Array, Keyword('select'), Array, Array),
BinaryExpression(String, Keyword('select'), Array, String),
BinaryExpression(Array, Keyword('select'), Code, Array),
BinaryExpression(Config, Keyword('select'), Number, Config),
BinaryExpression(Object, Keyword('selectdiarysubject'), String, Nothing),
BinaryExpression(Control, Keyword('selecteditorobject'), String, Anything),
BinaryExpression(Object, Keyword('selectionposition'), String, Array),
BinaryExpression(Object, Keyword('selectionposition'), Array, Array),
BinaryExpression(Group, Keyword('selectleader'), Object, Nothing),
BinaryExpression(Array, Keyword('selectrandomweighted'), Array, Anything),
BinaryExpression(Object, Keyword('selectweapon'), String, Nothing),
BinaryExpression(Object, Keyword('selectweaponturret'), Array, Nothing),
BinaryExpression(Object, Keyword('sendsimplecommand'), String, Nothing),
BinaryExpression(TeamMember, Keyword('sendtask'), Array, Task),
BinaryExpression(Task, Keyword('sendtaskresult'), Array, Nothing),
BinaryExpression(String, Keyword('servercommand'), String, Boolean),
BinaryExpression(Array, Keyword('set'), Array, Nothing),
BinaryExpression(Type, Keyword('set3denattribute'), Array, Boolean),
BinaryExpression(Type, Keyword('set3denlayer'), Number, Boolean),
BinaryExpression(Array, Keyword('set3denlogictype'), String, Nothing),
BinaryExpression(String, Keyword('set3denmissionattribute'), Array, Nothing),
BinaryExpression(Array, Keyword('set3denobjecttype'), String, Nothing),
BinaryExpression(Object, Keyword('setactualcollectivertd'), Number, Nothing),
BinaryExpression(Object, Keyword('setairplanethrottle'), Number, Nothing),
BinaryExpression(Object, Keyword('setairportside'), Side, Nothing),
BinaryExpression(Number, Keyword('setairportside'), Side, Nothing),
BinaryExpression(Object, Keyword('setammo'), Array, Nothing),
BinaryExpression(Object, Keyword('setammocargo'), Number, Nothing),
BinaryExpression(Object, Keyword('setammoonpylon'), Array, Nothing),
BinaryExpression(Object, Keyword('setanimspeedcoef'), Number, Nothing),
BinaryExpression(String, Keyword('setattributes'), Array, String),
BinaryExpression(String, Keyword('setattributes'), Array, String),
BinaryExpression(Object, Keyword('setautonomous'), Boolean, Nothing),
BinaryExpression(Object, Keyword('setbehaviour'), String, Nothing),
BinaryExpression(Group, Keyword('setbehaviour'), String, Nothing),
BinaryExpression(Object, Keyword('setbleedingremaining'), Number, Nothing),
BinaryExpression(Object, Keyword('setbrakesrtd'), Array, Nothing),
BinaryExpression(Object, Keyword('setcamerainterest'), Number, Nothing),
BinaryExpression(Boolean, Keyword('setcamuseti'), Number, Nothing),
BinaryExpression(Object, Keyword('setcaptive'), Boolean, Nothing),
BinaryExpression(Object, Keyword('setcaptive'), Number, Nothing),
BinaryExpression(Object, Keyword('setcenterofmass'), Array, Nothing),
BinaryExpression(Object, Keyword('setcollisionlight'), Boolean, Nothing),
BinaryExpression(Object, Keyword('setcombatmode'), String, Nothing),
BinaryExpression(Group, Keyword('setcombatmode'), String, Nothing),
BinaryExpression(TeamMember, Keyword('setcombatmode'), String, Nothing),
BinaryExpression(Object, Keyword('setconvoyseparation'), Number, Nothing),
BinaryExpression(Object, Keyword('setcuratorcameraareaceiling'), Number, Nothing),
BinaryExpression(Object, Keyword('setcuratorcoef'), Array, Nothing),
BinaryExpression(Object, Keyword('setcuratoreditingareatype'), Boolean, Nothing),
BinaryExpression(Object, Keyword('setcuratorwaypointcost'), Number, Nothing),
BinaryExpression(Object, Keyword('setcurrenttask'), Task, Nothing),
BinaryExpression(Group, Keyword('setcurrentwaypoint'), Array, Nothing),
BinaryExpression(Object, Keyword('setcustomaimcoef'), Number, Nothing),
BinaryExpression(Object, Keyword('setcustomweightrtd'), Number, Nothing),
BinaryExpression(Object, Keyword('setdamage'), Number, Nothing),
BinaryExpression(Object, Keyword('setdamage'), Array, Nothing),
BinaryExpression(Object, Keyword('setdammage'), Number, Nothing),
BinaryExpression(String, Keyword('setdebriefingtext'), Array, Nothing),
BinaryExpression(Object, Keyword('setdestination'), Array, Nothing),
BinaryExpression(Object, Keyword('setdir'), Number, Nothing),
BinaryExpression(Location, Keyword('setdirection'), Number, Nothing),
BinaryExpression(Control, Keyword('setdrawicon'), Array, Nothing),
BinaryExpression(Object, Keyword('setdriveonpath'), Array, Nothing),
BinaryExpression(Object, Keyword('setdropinterval'), Number, Nothing),
BinaryExpression(String, Keyword('setdynamicsimulationdistance'), Number, Nothing),
BinaryExpression(String, Keyword('setdynamicsimulationdistancecoef'), Number, Nothing),
BinaryExpression(Control, Keyword('seteditormode'), String, Nothing),
BinaryExpression(Control, Keyword('seteditorobjectscope'), Array, Nothing),
BinaryExpression(Object, Keyword('seteffectcondition'), String, Nothing),
BinaryExpression(Array, Keyword('seteffectcondition'), String, Nothing),
BinaryExpression(Object, Keyword('setenginerpmrtd'), Array, Nothing),
BinaryExpression(Object, Keyword('setface'), String, Nothing),
BinaryExpression(Object, Keyword('setfaceanimation'), Number, Nothing),
BinaryExpression(Object, Keyword('setfatigue'), Number, Nothing),
BinaryExpression(Object, Keyword('setfeaturetype'), Number, Boolean),
BinaryExpression(Object, Keyword('setflaganimationphase'), Number, Nothing),
BinaryExpression(Object, Keyword('setflagowner'), Object, Nothing),
BinaryExpression(Object, Keyword('setflagside'), Side, Nothing),
BinaryExpression(Object, Keyword('setflagtexture'), String, Nothing),
BinaryExpression(Number, Keyword('setfog'), Number, Nothing),
BinaryExpression(Number, Keyword('setfog'), Array, Nothing),
BinaryExpression(Number, Keyword('setforcegeneratorrtd'), Array, Nothing),
BinaryExpression(Object, Keyword('setformation'), String, Nothing),
BinaryExpression(Group, Keyword('setformation'), String, Nothing),
BinaryExpression(TeamMember, Keyword('setformation'), String, Nothing),
BinaryExpression(Object, Keyword('setformationtask'), String, Nothing),
BinaryExpression(Object, Keyword('setformdir'), Number, Nothing),
BinaryExpression(Group, Keyword('setformdir'), Number, Nothing),
BinaryExpression(Side, Keyword('setfriend'), Array, Nothing),
BinaryExpression(TeamMember, Keyword('setfromeditor'), Boolean, Nothing),
BinaryExpression(Number, Keyword('setfsmvariable'), Array, Nothing),
BinaryExpression(Object, Keyword('setfuel'), Number, Nothing),
BinaryExpression(Object, Keyword('setfuelcargo'), Number, Nothing),
BinaryExpression(Group, Keyword('setgroupicon'), Array, Nothing),
BinaryExpression(Group, Keyword('setgroupiconparams'), Array, Nothing),
BinaryExpression(Object, Keyword('setgroupid'), Array, Nothing),
BinaryExpression(Group, Keyword('setgroupid'), Array, Nothing),
BinaryExpression(Object, Keyword('setgroupidglobal'), Array, Nothing),
BinaryExpression(Group, Keyword('setgroupidglobal'), Array, Nothing),
BinaryExpression(Group, Keyword('setgroupowner'), Number, Boolean),
BinaryExpression(Number, Keyword('setgusts'), Number, Nothing),
BinaryExpression(Object, Keyword('sethidebehind'), Array, Nothing),
BinaryExpression(Object, Keyword('sethit'), Array, Nothing),
BinaryExpression(Object, Keyword('sethitindex'), Array, Nothing),
BinaryExpression(Object, Keyword('sethitpointdamage'), Array, | |
<gh_stars>1-10
# Copyright 2013 by <NAME> (<EMAIL>).
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code for dealing with Codon Seq.
CodonSeq class is interited from Seq class. This is the core class to
deal with sequences in CodonAlignment in biopython.
"""
from __future__ import division, print_function
from itertools import permutations
from math import log
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC, Gapped, HasStopCodon, Alphabet
from Bio.Alphabet import generic_dna, _ungap
from Bio.Data.CodonTable import generic_by_id
from Bio.CodonAlign.CodonAlphabet import default_codon_alphabet, default_codon_table
__docformat__ = "epytext en" # Don't just use plain text in epydoc API pages!
class CodonSeq(Seq):
"""CodonSeq is designed to be within the SeqRecords of a
CodonAlignment class.
CodonSeq is useful as it allows the user to specify
reading frame when translate CodonSeq
CodonSeq also accepts codon style slice by calling
get_codon() method.
Important: Ungapped CodonSeq can be any length if you
specify the rf_table. Gapped CodonSeq should be a
multiple of three.
>>> codonseq = CodonSeq("AAATTTGGGCCAAATTT", rf_table=(0,3,6,8,11,14))
>>> print(codonseq.translate())
KFGAKF
test get_full_rf_table method
>>> p = CodonSeq('AAATTTCCCGG-TGGGTTTAA', rf_table=(0, 3, 6, 9, 11, 14, 17))
>>> full_rf_table = p.get_full_rf_table()
>>> print(full_rf_table)
[0, 3, 6, 9, 12, 15, 18]
>>> print(p.translate(rf_table=full_rf_table, ungap_seq=False))
KFPPWV*
>>> p = CodonSeq('AAATTTCCCGGGAA-TTTTAA', rf_table=(0, 3, 6, 9, 14, 17))
>>> print(p.get_full_rf_table())
[0, 3, 6, 9, 12.0, 15, 18]
>>> p = CodonSeq('AAA------------TAA', rf_table=(0, 3))
>>> print(p.get_full_rf_table())
[0, 3.0, 6.0, 9.0, 12.0, 15]
"""
def __init__(self, data='', alphabet=default_codon_alphabet, \
gap_char="-", rf_table=None):
# rf_table should be a tuple or list indicating the every
# codon position along the sequence. For example:
# sequence = 'AAATTTGGGCCAAATTT'
# rf_table = (0, 3, 6, 8, 11, 14)
# the translated protein sequences will be
# AAA TTT GGG GCC AAA TTT
# K F G A K F
# Notice: rf_table applies to ungapped sequence. If there
# are gaps in the sequence, they will be discarded. This
# feature ensures the rf_table is independent of where the
# codon sequence appears in the alignment
Seq.__init__(self, data.upper(), alphabet=alphabet)
self.gap_char = gap_char
# check the length of the alignment to be a triple
if rf_table is None:
seq_ungapped = self._data.replace(gap_char, "")
assert len(self) % 3 == 0, "Sequence length is not a triple number"
self.rf_table = list(filter(lambda x: x%3 == 0,
range(len(seq_ungapped))))
# check alphabet
# Not use Alphabet._verify_alphabet function because it
# only works for single alphabet
for i in self.rf_table:
if self._data[i:i+3] not in alphabet.letters:
raise ValueError("Sequence contain undefined letters from"
" alphabet "
"({0})! ".format(self._data[i:i+3]))
else:
#if gap_char in self._data:
# assert len(self) % 3 == 0, \
# "Gapped sequence length is not a triple number"
assert isinstance(rf_table, (tuple, list)), \
"rf_table should be a tuple or list object"
assert all(isinstance(i, int) for i in rf_table), \
"elements in rf_table should be int that specify " \
+ "the codon positions of the sequence"
seq_ungapped = self._data.replace(gap_char, "")
for i in rf_table:
if seq_ungapped[i:i+3] not in alphabet.letters:
raise ValueError("Sequence contain undefined letters "
"from alphabet "
"({0})!".format(seq_ungapped[i:i+3]))
self.rf_table = rf_table
def __getitem__(self, index):
# TODO: handle alphabet elegantly
return Seq(self._data[index], alphabet=generic_dna)
def get_codon(self, index):
"""get the `index`-th codon from the self.seq
"""
if len(set([i % 3 for i in self.rf_table])) != 1:
raise RuntimeError("frameshift detected. "
"CodonSeq object is not able to deal "
"with codon sequence with frameshift. "
"Plase use normal slice option.")
if isinstance(index, int):
if index != -1:
return self._data[index*3:(index+1)*3]
else:
return self._data[index*3:]
else:
# This slice ensures that codon will always be the unit
# in slicing (it won't change to other codon if you are
# using reverse slicing such as [::-1]).
# The idea of the code below is to first map the slice
# to amino acid sequence and then transform it into
# codon sequence.
aa_index = range(len(self)//3)
def cslice(p):
aa_slice = aa_index[p]
codon_slice = ''
for i in aa_slice:
codon_slice += self._data[i*3:i*3+3]
return codon_slice
codon_slice = cslice(index)
return CodonSeq(codon_slice, alphabet=self.alphabet)
def get_codon_num(self):
"""Return the number of codons in the CodonSeq"""
return len(self.rf_table)
def translate(self, codon_table=default_codon_table,
stop_symbol="*", rf_table=None, ungap_seq=True):
"""Translate the CodonSeq based on the reading frame
in rf_table. It is possible for the user to specify
a rf_table at this point. If you want to include
gaps in the translated sequence, this is the only
way. ungap_seq should be set to true for this
purpose.
"""
amino_acids = []
if ungap_seq is True:
tr_seq = self._data.replace(self.gap_char, "")
else:
tr_seq = self._data
if rf_table is None:
rf_table = self.rf_table
p = -1 #initiation
for i in rf_table:
if isinstance(i, float):
amino_acids.append('-')
continue
#elif '---' == tr_seq[i:i+3]:
# amino_acids.append('-')
# continue
elif '-' in tr_seq[i:i+3]:
# considering two types of frameshift
if p == -1 or p - i == 3:
p = i
codon = tr_seq[i:i+6].replace('-', '')[:3]
elif p - i > 3:
codon = tr_seq[i:i+3]
p = i
else:
# normal condition without gaps
codon = tr_seq[i:i+3]
p = i
if codon in codon_table.stop_codons:
amino_acids.append(stop_symbol)
continue
try:
amino_acids.append(codon_table.forward_table[codon])
except KeyError:
raise RuntimeError("Unknown codon detected ({0}). Do you "
"forget to speficy ungap_seq "
"argument?".format(codon))
return "".join(amino_acids)
def toSeq(self, alphabet=generic_dna):
return Seq(self._data, generic_dna)
def get_full_rf_table(self):
"""This function returns a full rf_table of the given
CodonSeq records. A full rf_table is different from
normal rf_table in that it translate gaps in CodonSeq.
It is helpful to construct alignment containing
frameshift.
"""
ungap_seq = self._data.replace("-", "")
codon_lst = [ungap_seq[i:i+3] for i in self.rf_table]
relative_pos = [self.rf_table[0]]
for i in range(1, len(self.rf_table[1:])+1):
relative_pos.append(self.rf_table[i]-self.rf_table[i-1])
full_rf_table = []
codon_num = 0
for i in filter(lambda x: x%3==0, range(len(self._data))):
if self._data[i:i+3] == self.gap_char*3:
full_rf_table.append(i+0.0)
elif relative_pos[codon_num] == 0:
full_rf_table.append(i)
codon_num += 1
elif relative_pos[codon_num] in (-1, -2):
# check the gap status of previous codon
gap_stat = len(self._data[i-3:i].replace("-", ""))
if gap_stat == 3:
full_rf_table.append(i+relative_pos[codon_num])
elif gap_stat == 2:
full_rf_table.append(i+1+relative_pos[codon_num])
elif gap_stat == 1:
full_rf_table.append(i+2+relative_pos[codon_num])
codon_num += 1
elif relative_pos[codon_num] > 0:
full_rf_table.append(i+0.0)
try:
this_len = len(self._data[i:i+3].replace("-", ""))
relative_pos[codon_num] -= this_len
except:
# we probably reached the last codon
pass
return full_rf_table
def full_translate(self, codon_table=default_codon_table, stop_symbol="*"):
"""Apply full translation with gaps considered.
"""
full_rf_table = self.get_full_rf_table()
return self.translate(codon_table=codon_table, stop_symbol=stop_symbol,
rf_table=full_rf_table, ungap_seq=False)
def ungap(self, gap=None):
if hasattr(self.alphabet, "gap_char"):
if not gap:
gap = self.alphabet.gap_char
elif gap != self.alphabet.gap_char:
raise ValueError("Gap %s does not match %s from alphabet"
% (repr(gap), repr(self.alphabet.alphabet.gap_char)))
alpha = _ungap(self.alphabet)
elif not gap:
raise ValueError("Gap character not given and not defined in "
"alphabet")
else:
alpha = self.alphabet # modify!
if len(gap) != 1 or not isinstance(gap, str):
raise ValueError("Unexpected gap character, %s" % repr(gap))
return CodonSeq(str(self._data).replace(gap, ""), alpha,
rf_table=self.rf_table)
@classmethod
def from_seq(cls, seq, alphabet=default_codon_alphabet, rf_table=None):
if rf_table is None:
return cls(seq._data, alphabet=alphabet)
else:
return cls(seq._data, alphabet=alphabet, rf_table=rf_table)
def _get_codon_list(codonseq):
"""get a list of codons according to full_rf_table for counting
(PRIVATE).
"""
#if not isinstance(codonseq, CodonSeq):
# raise TypeError("_get_codon_list accept a CodonSeq object "
# "({0} detected)".format(type(codonseq)))
full_rf_table = codonseq.get_full_rf_table()
codon_lst = []
for i, k in enumerate(full_rf_table):
if isinstance(k, int):
start = k
try:
end = int(full_rf_table[i+1])
except IndexError:
end = start+3
this_codon = str(codonseq[start:end])
if len(this_codon) == 3:
codon_lst.append(this_codon)
else:
codon_lst.append(str(this_codon.ungap()))
elif str(codonseq[int(k):int(k)+3]) == "---":
codon_lst.append("---")
else:
# this may be problematic, as normally no codon shoud
# fall into this condition
codon_lst.append(codonseq[int(k):int(k)+3])
return codon_lst
def cal_dn_ds(codon_seq1, codon_seq2, method="NG86",
codon_table=default_codon_table, k=1, cfreq=None):
"""Function to calculate the dN and dS of the given two CodonSeq
or SeqRecord that contain CodonSeq objects.
Available methods:
- NG86 - PMID: 3444411
- LWL85 - PMID: 3916709
- ML - PMID: 7968486
- YN00 - PMID: 10666704
Arguments:
- w - transition/transvertion ratio
- cfreq - Current codon frequency vector can only be specified
when you are using ML method. Possible ways of
getting cfreq are: F1x4, F3x4 and F61.
"""
if all([isinstance(codon_seq1, CodonSeq),
isinstance(codon_seq2, CodonSeq)]):
pass
elif all([isinstance(codon_seq1, | |
<reponame>atten/django-docker-helpers
import inspect
import logging
import os
import textwrap
import typing as t
from collections import deque, namedtuple
from pprint import pformat
from django_docker_helpers.utils import import_from, shred, wf, run_env_once
from . import exceptions
from .backends import *
DEFAULT_PARSER_MODULE_PATH = 'django_docker_helpers.config.backends'
DEFAULT_PARSER_MODULES = (
'{0}.EnvironmentParser'.format(DEFAULT_PARSER_MODULE_PATH),
'{0}.MPTRedisParser'.format(DEFAULT_PARSER_MODULE_PATH),
'{0}.MPTConsulParser'.format(DEFAULT_PARSER_MODULE_PATH),
'{0}.RedisParser'.format(DEFAULT_PARSER_MODULE_PATH),
'{0}.ConsulParser'.format(DEFAULT_PARSER_MODULE_PATH),
'{0}.YamlParser'.format(DEFAULT_PARSER_MODULE_PATH),
)
def comma_str_to_list(raw_val: str) -> t.List[str]:
return list(filter(None, raw_val.split(',')))
ConfigReadItem = namedtuple('ConfigReadItem', ['variable_path', 'value', 'type', 'is_default', 'parser_name'])
class ConfigLoader:
"""
- provides a single interface to read from specified config parsers in order they present;
- tracks accessed from parsers options;
- prints config options access log in pretty-print way.
Example:
::
env = {
'PROJECT__DEBUG': 'false'
}
parsers = [
EnvironmentParser(scope='project', env=env),
RedisParser('my/conf/service/config.yml', host=REDIS_HOST, port=REDIS_PORT),
YamlParser(config='./tests/data/config.yml', scope='project'),
]
configure = ConfigLoader(parsers=parsers)
DEBUG = configure('debug') # 'false'
DEBUG = configure('debug', coerce_type=bool) # False
"""
def __init__(self,
parsers: t.List[BaseParser],
silent: bool = False,
suppress_logs: bool = False,
keep_read_records_max: int = 1024):
"""
Initialization:
- takes a list of initialized parsers;
- it's supposed to use ONLY unique parsers for ``parsers`` argument
(or you are going to get the same initial arguments for all parsers of the same type in
:meth:`~django_docker_helpers.config.ConfigLoader.from_env`);
- the parsers's order does matter.
:param parsers: a list of initialized parsers
:param silent: don't raise exceptions if any read attempt failed
:param suppress_logs: don't display any exception warnings on screen
:param keep_read_records_max: max capacity queue length
"""
self.parsers = parsers
self.silent = silent
self.suppress_logs = suppress_logs
self.sentinel = object()
self.logger = logging.getLogger(self.__class__.__name__)
self.config_read_queue = deque(maxlen=keep_read_records_max)
self.colors_map = {
'title': '\033[1;35m',
'parser': '\033[0;33m',
'path': '\033[94m',
'type': '\033[1;33m',
'value': '\033[32m',
'reset': '\033[0m',
}
def enqueue(self,
variable_path: str,
parser: t.Optional[BaseParser] = None,
value: t.Any = None):
self.config_read_queue.append(ConfigReadItem(
variable_path,
shred(variable_path, value),
type(value).__name__,
not bool(parser),
str(parser),
))
def __call__(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
required: bool = False,
**kwargs):
"""
A useful shortcut for method :meth:`~django_docker_helpers.config.ConfigLoader.get`
"""
return self.get(
variable_path, default=default,
coerce_type=coerce_type, coercer=coercer,
required=required,
**kwargs)
def get(self,
variable_path: str,
default: t.Optional[t.Any] = None,
coerce_type: t.Optional[t.Type] = None,
coercer: t.Optional[t.Callable] = None,
required: bool = False,
**kwargs):
"""
Tries to read a ``variable_path`` from each of the passed parsers.
It stops if read was successful and returns a retrieved value.
If none of the parsers contain a value for the specified path it returns ``default``.
:param variable_path: a path to variable in config
:param default: a default value if ``variable_path`` is not present anywhere
:param coerce_type: cast a result to a specified type
:param coercer: perform the type casting with specified callback
:param required: raise ``RequiredValueIsEmpty`` if no ``default`` and no result
:param kwargs: additional options to all parsers
:return: **the first successfully read** value from the list of parser instances or ``default``
:raises config.exceptions.RequiredValueIsEmpty: if nothing is read,``required``
flag is set, and there's no ``default`` specified
"""
for p in self.parsers:
try:
val = p.get(
variable_path, default=self.sentinel,
coerce_type=coerce_type, coercer=coercer,
**kwargs
)
if val != self.sentinel:
self.enqueue(variable_path, p, val)
return val
except Exception as e:
if not self.silent:
raise
if self.suppress_logs:
continue
self.logger.error('Parser {0} cannot get key `{1}`: {2}'.format(
p.__class__.__name__,
variable_path,
str(e)
))
self.enqueue(variable_path, value=default)
if not default and required:
raise exceptions.RequiredValueIsEmpty(
'No default provided and no value read for `{0}`'.format(variable_path))
return default
@staticmethod
def import_parsers(parser_modules: t.Iterable[str]) -> t.Generator[t.Type[BaseParser], None, None]:
"""
Resolves and imports all modules specified in ``parser_modules``. Short names from the local scope
are supported (the scope is ``django_docker_helpers.config.backends``).
:param parser_modules: a list of dot-separated module paths
:return: a generator of [probably] :class:`~django_docker_helpers.config.backends.base.BaseParser`
Example:
::
parsers = list(ConfigLoader.import_parsers([
'EnvironmentParser',
'django_docker_helpers.config.backends.YamlParser'
]))
assert parsers == [EnvironmentParser, YamlParser]
"""
for import_path in parser_modules:
path_parts = import_path.rsplit('.', 1)
if len(path_parts) == 2:
mod_path, parser_class_name = path_parts
else:
mod_path = DEFAULT_PARSER_MODULE_PATH
parser_class_name = import_path
yield import_from(mod_path, parser_class_name)
@staticmethod
def load_parser_options_from_env(
parser_class: t.Type[BaseParser],
env: t.Optional[t.Dict[str, str]] = None) -> t.Dict[str, t.Any]:
"""
Extracts arguments from ``parser_class.__init__`` and populates them from environment variables.
Uses ``__init__`` argument type annotations for correct type casting.
.. note::
Environment variables should be prefixed with ``<UPPERCASEPARSERCLASSNAME>__``.
:param parser_class: a subclass of :class:`~django_docker_helpers.config.backends.base.BaseParser`
:param env: a dict with environment variables, default is ``os.environ``
:return: parser's ``__init__`` arguments dict mapping
Example:
::
env = {
'REDISPARSER__ENDPOINT': 'go.deep',
'REDISPARSER__HOST': 'my-host',
'REDISPARSER__PORT': '66',
}
res = ConfigLoader.load_parser_options_from_env(RedisParser, env)
assert res == {'endpoint': 'go.deep', 'host': 'my-host', 'port': 66}
"""
env = env or os.environ
sentinel = object()
spec: inspect.FullArgSpec = inspect.getfullargspec(parser_class.__init__)
environment_parser = EnvironmentParser(scope=parser_class.__name__.upper(), env=env)
stop_args = ['self']
safe_types = [int, bool, str]
init_args = {}
for arg_name in spec.args:
if arg_name in stop_args:
continue
type_hint = spec.annotations.get(arg_name)
coerce_type = None
if type_hint in safe_types:
coerce_type = type_hint
elif hasattr(type_hint, '__args__'):
if len(type_hint.__args__) == 1: # one type
if type_hint.__args__[0] in safe_types:
coerce_type = type_hint.__args__[0]
elif len(type_hint.__args__) == 2: # t.Optional
try:
_args = list(type_hint.__args__)
_args.remove(type(None))
if _args[0] in safe_types:
coerce_type = _args[0]
except ValueError:
pass
val = environment_parser.get(arg_name, sentinel, coerce_type=coerce_type)
if val is sentinel:
continue
init_args[arg_name] = val
return init_args
@staticmethod
def from_env(parser_modules: t.Optional[t.Union[t.List[str], t.Tuple[str]]] = DEFAULT_PARSER_MODULES,
env: t.Optional[t.Dict[str, str]] = None,
silent: bool = False,
suppress_logs: bool = False,
extra: t.Optional[dict] = None) -> 'ConfigLoader':
"""
Creates an instance of :class:`~django_docker_helpers.config.ConfigLoader`
with parsers initialized from environment variables.
By default it tries to initialize all bundled parsers.
Parsers may be customized with ``parser_modules`` argument or ``CONFIG__PARSERS`` environment variable.
Environment variable has a priority over the method argument.
:param parser_modules: a list of dot-separated module paths
:param env: a dict with environment variables, default is ``os.environ``
:param silent: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param suppress_logs: passed to :class:`~django_docker_helpers.config.ConfigLoader`
:param extra: pass extra arguments to *every* parser
:return: an instance of :class:`~django_docker_helpers.config.ConfigLoader`
Example:
::
env = {
'CONFIG__PARSERS': 'EnvironmentParser,RedisParser,YamlParser',
'ENVIRONMENTPARSER__SCOPE': 'nested',
'YAMLPARSER__CONFIG': './tests/data/config.yml',
'REDISPARSER__HOST': 'wtf.test',
'NESTED__VARIABLE': 'i_am_here',
}
loader = ConfigLoader.from_env(env=env)
assert [type(p) for p in loader.parsers] == [EnvironmentParser, RedisParser, YamlParser]
assert loader.get('variable') == 'i_am_here', 'Ensure env copied from ConfigLoader'
loader = ConfigLoader.from_env(parser_modules=['EnvironmentParser'], env={})
"""
env = env or os.environ
extra = extra or {}
environment_parser = EnvironmentParser(scope='config', env=env)
silent = environment_parser.get('silent', silent, coerce_type=bool)
suppress_logs = environment_parser.get('suppress_logs', suppress_logs, coerce_type=bool)
env_parsers = environment_parser.get('parsers', None, coercer=comma_str_to_list)
if not env_parsers and not parser_modules:
raise ValueError('Must specify `CONFIG__PARSERS` env var or `parser_modules`')
if env_parsers:
parser_classes = ConfigLoader.import_parsers(env_parsers)
else:
parser_classes = ConfigLoader.import_parsers(parser_modules)
parsers = []
for parser_class in parser_classes:
parser_options = ConfigLoader.load_parser_options_from_env(parser_class, env=env)
_init_args = inspect.getfullargspec(parser_class.__init__).args
# add extra args if parser's __init__ can take it it
if 'env' in _init_args:
parser_options['env'] = env
for k, v in extra.items():
if k in _init_args:
parser_options[k] = v
parser_instance = parser_class(**parser_options)
parsers.append(parser_instance)
return ConfigLoader(parsers=parsers, silent=silent, suppress_logs=suppress_logs)
def _colorize(self, name: str, value: str, use_color: bool = False) -> str:
if not use_color:
return value
color = self.colors_map.get(name, '')
if not color:
return value
reset = self.colors_map['reset']
parts = [color + p + reset for p in str(value).split('\n')]
return '\n'.join(parts)
@staticmethod
def _pformat(raw_obj: t.Union[str, t.Any], width: int = 50) -> str:
raw_str = str(raw_obj)
if len(raw_str) <= width:
return raw_obj
if isinstance(raw_obj, str):
return '\n'.join(textwrap.wrap(raw_str, width=width))
return pformat(raw_obj, width=width, compact=True)
@run_env_once
def print_config_read_queue(
self,
use_color: bool = False,
max_col_width: int = 50):
"""
Prints all read (in call order) options.
:param max_col_width: limit column width, ``50`` by default
:param use_color: use terminal colors
:return: nothing
"""
wf(self.format_config_read_queue(use_color=use_color, max_col_width=max_col_width))
wf('\n')
def format_config_read_queue(self,
use_color: bool = False,
max_col_width: int = 50) -> str:
"""
Prepares a string with pretty printed config read queue.
:param use_color: use terminal colors
:param max_col_width: limit column width, ``50`` by default
:return:
"""
try:
from terminaltables import SingleTable
except ImportError:
import warnings
warnings.warn('Cannot display config read queue. Install terminaltables first.')
return ''
col_names_order = ['path', 'value', 'type', 'parser']
pretty_bundles = [[self._colorize(name, name.capitalize(), use_color=use_color)
for name in col_names_order]]
for config_read_item in self.config_read_queue:
pretty_attrs = [
config_read_item.variable_path,
config_read_item.value,
config_read_item.type,
config_read_item.parser_name
]
pretty_attrs = [self._pformat(pa, max_col_width) for pa in pretty_attrs]
if config_read_item.is_default:
pretty_attrs[0] = '*' + pretty_attrs[0]
if use_color:
pretty_attrs = [self._colorize(column_name, pretty_attr, use_color=use_color)
for column_name, pretty_attr in zip(col_names_order, pretty_attrs)]
pretty_bundles.append(pretty_attrs)
table = SingleTable(pretty_bundles)
table.title = self._colorize('title', 'CONFIG READ QUEUE', use_color=use_color)
table.justify_columns[0] = 'right'
# table.inner_row_border = True
| |
= self.request.config.getini("pytester_example_dir")
if example_dir is None:
raise ValueError("pytester_example_dir is unset, can't copy examples")
example_dir = self.request.config.rootdir.join(example_dir)
for extra_element in self.request.node.iter_markers("pytester_example_path"):
assert extra_element.args
example_dir = example_dir.join(*extra_element.args)
if name is None:
func_name = self._name
maybe_dir = example_dir / func_name
maybe_file = example_dir / (func_name + ".py")
if maybe_dir.isdir():
example_path = maybe_dir
elif maybe_file.isfile():
example_path = maybe_file
else:
raise LookupError(
"{} cant be found as module or package in {}".format(
func_name, example_dir.bestrelpath(self.request.config.rootdir)
)
)
else:
example_path = example_dir.join(name)
if example_path.isdir() and not example_path.join("__init__.py").isfile():
example_path.copy(self.tmpdir)
return self.tmpdir
elif example_path.isfile():
result = self.tmpdir.join(example_path.basename)
example_path.copy(result)
return result
else:
raise LookupError(
f'example "{example_path}" is not found as a file or directory'
)
Session = Session
def getnode(self, config: Config, arg):
"""Return the collection node of a file.
:param _pytest.config.Config config:
A pytest config.
See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it.
:param py.path.local arg:
Path to the file.
"""
session = Session.from_config(config)
assert "::" not in str(arg)
p = py.path.local(arg)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([str(p)], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
return res
def getpathnode(self, path):
"""Return the collection node of a file.
This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to
create the (configured) pytest Config instance.
:param py.path.local path: Path to the file.
"""
config = self.parseconfigure(path)
session = Session.from_config(config)
x = session.fspath.bestrelpath(path)
config.hook.pytest_sessionstart(session=session)
res = session.perform_collect([x], genitems=False)[0]
config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK)
return res
def genitems(self, colitems: Sequence[Union[Item, Collector]]) -> List[Item]:
"""Generate all test items from a collection node.
This recurses into the collection node and returns a list of all the
test items contained within.
"""
session = colitems[0].session
result = [] # type: List[Item]
for colitem in colitems:
result.extend(session.genitems(colitem))
return result
def runitem(self, source):
"""Run the "test_func" Item.
The calling test instance (class containing the test method) must
provide a ``.getrunner()`` method which should return a runner which
can run the test protocol for a single item, e.g.
:py:func:`_pytest.runner.runtestprotocol`.
"""
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.instance
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs) -> HookRecorder:
"""Run a test module in process using ``pytest.main()``.
This run writes "source" into a temporary file and runs
``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance
for the result.
:param source: The source code of the test module.
:param cmdlineargs: Any extra command line arguments to use.
:returns: :py:class:`HookRecorder` instance of the result.
"""
p = self.makepyfile(source)
values = list(cmdlineargs) + [p]
return self.inline_run(*values)
def inline_genitems(self, *args) -> Tuple[List[Item], HookRecorder]:
"""Run ``pytest.main(['--collectonly'])`` in-process.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself like :py:meth:`inline_run`, but returns a
tuple of the collected items and a :py:class:`HookRecorder` instance.
"""
rec = self.inline_run("--collect-only", *args)
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
return items, rec
def inline_run(
self, *args, plugins=(), no_reraise_ctrlc: bool = False
) -> HookRecorder:
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
Runs the :py:func:`pytest.main` function to run all of pytest inside
the test process itself. This means it can return a
:py:class:`HookRecorder` instance which gives more detailed results
from that run than can be done by matching stdout/stderr from
:py:meth:`runpytest`.
:param args:
Command line arguments to pass to :py:func:`pytest.main`.
:param plugins:
Extra plugin instances the ``pytest.main()`` instance should use.
:param no_reraise_ctrlc:
Typically we reraise keyboard interrupts from the child run. If
True, the KeyboardInterrupt exception is captured.
:returns: A :py:class:`HookRecorder` instance.
"""
# (maybe a cpython bug?) the importlib cache sometimes isn't updated
# properly between file creation and inline_run (especially if imports
# are interspersed with file creation)
importlib.invalidate_caches()
plugins = list(plugins)
finalizers = []
try:
# Any sys.module or sys.path changes done while running pytest
# inline should be reverted after the test run completes to avoid
# clashing with later inline tests run within the same pytest test,
# e.g. just because they use matching test module names.
finalizers.append(self.__take_sys_modules_snapshot().restore)
finalizers.append(SysPathsSnapshot().restore)
# Important note:
# - our tests should not leave any other references/registrations
# laying around other than possibly loaded test modules
# referenced from sys.modules, as nothing will clean those up
# automatically
rec = []
class Collect:
def pytest_configure(x, config: Config) -> None:
rec.append(self.make_hook_recorder(config.pluginmanager))
plugins.append(Collect())
ret = pytest.main(list(args), plugins=plugins)
if len(rec) == 1:
reprec = rec.pop()
else:
class reprec: # type: ignore
pass
reprec.ret = ret
# Typically we reraise keyboard interrupts from the child run
# because it's our user requesting interruption of the testing.
if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc:
calls = reprec.getcalls("pytest_keyboard_interrupt")
if calls and calls[-1].excinfo.type == KeyboardInterrupt:
raise KeyboardInterrupt()
return reprec
finally:
for finalizer in finalizers:
finalizer()
def runpytest_inprocess(self, *args, **kwargs) -> RunResult:
"""Return result of running pytest in-process, providing a similar
interface to what self.runpytest() provides."""
syspathinsert = kwargs.pop("syspathinsert", False)
if syspathinsert:
self.syspathinsert()
now = timing.time()
capture = _get_multicapture("sys")
capture.start_capturing()
try:
try:
reprec = self.inline_run(*args, **kwargs)
except SystemExit as e:
ret = e.args[0]
try:
ret = ExitCode(e.args[0])
except ValueError:
pass
class reprec: # type: ignore
ret = ret
except Exception:
traceback.print_exc()
class reprec: # type: ignore
ret = ExitCode(3)
finally:
out, err = capture.readouterr()
capture.stop_capturing()
sys.stdout.write(out)
sys.stderr.write(err)
assert reprec.ret is not None
res = RunResult(
reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now
)
res.reprec = reprec # type: ignore
return res
def runpytest(self, *args, **kwargs) -> RunResult:
"""Run pytest inline or in a subprocess, depending on the command line
option "--runpytest" and return a :py:class:`RunResult`."""
args = self._ensure_basetemp(args)
if self._method == "inprocess":
return self.runpytest_inprocess(*args, **kwargs)
elif self._method == "subprocess":
return self.runpytest_subprocess(*args, **kwargs)
raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
def _ensure_basetemp(self, args):
args = list(args)
for x in args:
if str(x).startswith("--basetemp"):
break
else:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
return args
def parseconfig(self, *args) -> Config:
"""Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create
a new :py:class:`_pytest.core.PluginManager` and call the
pytest_cmdline_parse hook to create a new
:py:class:`_pytest.config.Config` instance.
If :py:attr:`plugins` has been populated they should be plugin modules
to be registered with the PluginManager.
"""
args = self._ensure_basetemp(args)
import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins) # type: ignore[arg-type]
# we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure)
return config
def parseconfigure(self, *args) -> Config:
"""Return a new pytest configured Config instance.
Returns a new :py:class:`_pytest.config.Config` instance like
:py:meth:`parseconfig`, but also calls the pytest_configure hook.
"""
config = self.parseconfig(*args)
config._do_configure()
return config
def getitem(self, source, funcname: str = "test_func") -> Item:
"""Return the test item for a test function.
Writes the source to a python file and runs pytest's collection on
the resulting module, returning the test item for the requested
function name.
:param source:
The module source.
:param funcname:
The name of the test function for which to return a test item.
"""
items = self.getitems(source)
for item in items:
if item.name == funcname:
return item
assert 0, "{!r} item not found in module:\n{}\nitems: {}".format(
funcname, source, items
)
def getitems(self, source) -> List[Item]:
"""Return all test items collected from the module.
Writes the source to a Python file and runs pytest's collection on
the resulting module, returning all test items contained within.
"""
modcol = self.getmodulecol(source)
return self.genitems([modcol])
def getmodulecol(self, source, configargs=(), withinit: bool = False):
"""Return the module collection node for ``source``.
Writes ``source`` to a file using :py:meth:`makepyfile` and then
runs the pytest collection on it, returning the collection node for the
test module.
:param source:
The source code of the module to collect.
:param configargs:
Any extra arguments to pass to :py:meth:`parseconfigure`.
:param withinit:
Whether to also write an ``__init__.py`` file to the same
directory to ensure it is a package.
"""
if isinstance(source, Path):
path = self.tmpdir.join(str(source))
assert not withinit, "not supported for paths"
else:
kw = {self._name: Source(source).strip()}
path = self.makepyfile(**kw)
if | |
# coding=utf-8
import pytest
from django.test import TestCase, RequestFactory, Client
from django.utils import timezone
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.contrib.contenttypes.models import ContentType
from mock import patch
from molo.core.models import (
ArticlePage, CmsSettings, Main,
SiteLanguageRelation, Languages, SectionIndexPage, FooterIndexPage,
BannerIndexPage, TagIndexPage, BannerPage,
Timezone, Tag, ArticlePageTags, Site, LanguageRelation
)
from molo.core import constants
from molo.core.templatetags.core_tags import (
load_child_articles_for_section,
get_translation)
from molo.core.molo_wagtail_models import MoloPage
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.tasks import promote_articles
from molo.core.wagtail_hooks import copy_translation_pages
from wagtail.images.tests.utils import Image, get_test_image_file
@pytest.mark.django_db
class TestModels(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
self.factory = RequestFactory()
self.main = Main.objects.all().first()
self.language_setting = Languages.objects.create(
site_id=self.main.get_site().pk)
self.english = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='en', is_active=True)
LanguageRelation.objects.create(
page=self.main, language=self.english)
self.french = SiteLanguageRelation.objects.create(
language_setting=self.language_setting,
locale='fr', is_active=True)
LanguageRelation.objects.create(
page=self.main, language=self.french)
LanguageRelation.objects.create(
page=self.main, language=self.english)
LanguageRelation.objects.create(
page=self.banner_index, language=self.english)
LanguageRelation.objects.create(
page=self.tag_index, language=self.english)
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.yourmind = self.mk_section(
self.section_index, title='Your mind')
self.yourmind_sub = self.mk_section(
self.yourmind, title='Your mind subsection')
self.mk_main2()
self.main2 = Main.objects.all().last()
self.language_setting2 = Languages.objects.create(
site_id=self.main2.get_site().pk)
self.english2 = SiteLanguageRelation.objects.create(
language_setting=self.language_setting2,
locale='en',
is_active=True)
self.spanish = SiteLanguageRelation.objects.create(
language_setting=self.language_setting2,
locale='es',
is_active=True)
LanguageRelation.objects.create(
page=self.main2, language=self.english2)
LanguageRelation.objects.create(
page=self.main2, language=self.spanish)
# Create an image for running tests on
self.image = Image.objects.create(
title="Test image",
file=get_test_image_file(),
)
self.image2 = Image.objects.create(
title="Test image 2",
file=get_test_image_file(),
)
self.yourmind2 = self.mk_section(
self.section_index2, title='Your mind')
self.yourmind_sub2 = self.mk_section(
self.yourmind2, title='Your mind subsection')
self.client = Client()
self.client.get("/")
self.site = self.main.get_site()
def test_multisite_one_root_page(self):
second_site = Site.objects.create(
hostname='kaios.mr.com', port=80, root_page=self.main,
is_default_site=False, site_name='kaios main')
self.assertEqual(self.main.get_site().pk, second_site.pk)
def test_copy_method_of_article_page_copies_over_languages(self):
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
article = self.mk_articles(self.yourmind, 1)[0]
LanguageRelation.objects.create(
page=article, language=self.english2)
self.mk_article_translation(article, self.french)
article2 = article.copy(to=self.yourmind2)
copy_translation_pages(article, article2)
self.assertTrue(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(
locale='fr').first().is_active)
def test_move_method_of_article_page_copies_over_languages(self):
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
article = self.mk_articles(self.yourmind, 1)[0]
fr_article = self.mk_article_translation(article, self.french)
fr_article.move(self.yourmind2)
self.assertTrue(
Languages.for_site(
self.main2.get_site()).languages.filter(locale='fr').exists())
self.assertFalse(
Languages.for_site(
self.main2.get_site()).languages.filter(
locale='fr').first().is_active)
def test_sections_method_of_main_gives_children_of_main_only(self):
sections = self.main.sections()
self.assertFalse(sections.child_of(self.main2).exists())
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_section_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=SectionIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
SectionIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
SectionIndexPage.objects.child_of(self.main2).count(), 1)
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_tag_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=TagIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
TagIndexPage.objects.child_of(self.main2).count(), 1)
self.tag_index.copy(to=self.main2)
self.assertEqual(
TagIndexPage.objects.child_of(self.main2).count(), 1)
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_footer_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=FooterIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
FooterIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
FooterIndexPage.objects.child_of(self.main2).count(), 1)
@pytest.mark.django_db(transaction=True)
def test_copy_method_of_banner_index_wont_duplicate_index_pages(self):
LanguageRelation.objects.create(
page=BannerIndexPage.objects.child_of(self.main2).first(),
language=self.spanish)
self.assertEqual(
BannerIndexPage.objects.child_of(self.main2).count(), 1)
self.section_index.copy(to=self.main2)
self.assertEqual(
BannerIndexPage.objects.child_of(self.main2).count(), 1)
def test_main_returns_bannerpages(self):
banner = BannerPage(title='test banner')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
banner = BannerPage(title='test banner 2')
self.banner_index.add_child(instance=banner)
banner.save_revision().publish()
self.assertEqual(self.main.bannerpages().count(), 2)
def test_get_parent_section_for_article(self):
article = self.mk_article(self.yourmind_sub)
parent = article.get_parent_section()
self.assertEqual(parent.pk, self.yourmind_sub.pk)
def test_get_parent_section_for_section(self):
parent = self.yourmind_sub.get_parent_section()
self.assertEqual(parent.pk, self.yourmind.pk)
def test_get_top_level_parent(self):
title = 'title'
main_content_type, created = ContentType.objects.get_or_create(
model='main', app_label='core')
main = Main.objects.create(
title=title, slug=title, content_type=main_content_type,
path='00010011', depth=2, numchild=0, url_path='/home/',
)
SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='en', is_active=True)
french = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='fr', is_active=True)
en_section = self.mk_section(
main, title="New Section", slug="new-section")
en_section2 = self.mk_section(
en_section, title="New Section 2", slug="new-section-2")
en_section3 = self.mk_section(
en_section2, title="New Section 3", slug="new-section-3")
en_section4 = self.mk_section(
en_section3, title="New Section 4", slug="new-section-4")
self.mk_section_translation(en_section, french)
self.mk_section_translation(en_section2, french)
fr_section3 = self.mk_section_translation(en_section3, french)
fr_section4 = self.mk_section_translation(en_section4, french)
parent = fr_section3.get_top_level_parent(locale='en')
self.assertEqual(parent.pk, en_section.pk)
self.assertEqual(fr_section3.depth, 5)
self.assertEqual(parent.depth, 3)
parent = fr_section4.get_top_level_parent(locale='en')
self.assertEqual(parent.pk, en_section.pk)
self.assertEqual(fr_section4.depth, 6)
self.assertEqual(parent.depth, 3)
parent = fr_section4.get_top_level_parent(locale='en', depth=4)
self.assertEqual(parent.pk, en_section2.pk)
self.assertEqual(fr_section4.depth, 6)
self.assertEqual(parent.depth, 4)
parent = fr_section4.get_top_level_parent(locale='en', depth=2)
self.assertEqual(parent.pk, main.pk)
self.assertEqual(parent.depth, 2)
parent = fr_section4.get_top_level_parent(locale='en', depth=-1)
self.assertEqual(parent, None)
def test_article_order(self):
now = timezone.now()
article1 = self.mk_article(
self.yourmind_sub, first_published_at=now)
self.mk_article(
self.yourmind_sub,
first_published_at=now + timezone.timedelta(hours=1))
# most recent first
self.assertEqual(
self.yourmind_sub.articles()[0].title, article1.title)
# swap published date
article1.first_published_at = now + timezone.timedelta(hours=4)
article1.save_revision().publish()
self.assertEqual(
self.yourmind_sub.articles()[0].title, article1.title)
def test_get_effective_image_for_sections(self):
en_section = self.mk_section(
self.section_index,
title="New Section", slug="new-section",
image=self.image)
self.assertEqual(
en_section.get_effective_image(), self.image)
# image not set to use inherited value
en_section2 = self.mk_section(
en_section, title="New Section 2", slug="new-section-2")
self.assertEqual(
en_section2.get_effective_image(), en_section.image)
# image not set to use inherited value
en_section3 = self.mk_section(
en_section2, title="New Section 3", slug="new-section-3")
self.assertEqual(
en_section3.get_effective_image(), en_section.image)
# set the image
en_section3.image = self.image2
self.assertEqual(
en_section3.get_effective_image(), self.image2)
# if translated section doesn't have
# an image it will inherited from the parent
fr_section3 = self.mk_section_translation(en_section3, self.french)
self.assertEqual(
fr_section3.get_effective_image(), en_section3.image)
fr_section2 = self.mk_section_translation(en_section2, self.french)
self.assertEqual(
fr_section2.get_effective_image(), en_section.image)
# check if the section doesn't have image it will return None
en_section4 = self.mk_section(
self.section_index,
title="New Section 4", slug="new-section-4", )
self.assertEqual(
en_section4.get_effective_image(), '')
fr_section4 = self.mk_section_translation(en_section4, self.french)
self.assertEqual(
fr_section4.get_effective_image(), '')
def test_get_effective_image_for_articles(self):
section = self.mk_section(
self.section_index, title="Section", slug="section")
en_article1, en_article2 = self.mk_articles(section, 2)
fr_article1 = self.mk_article_translation(en_article1, self.french)
self.assertEqual(
en_article1.get_effective_image(), '')
self.assertEqual(
fr_article1.get_effective_image(), '')
en_article1.image = self.image
en_article1.save()
self.assertEqual(
en_article1.get_effective_image(), self.image)
# if image not set it should inherite from the main language article
self.assertEqual(
fr_article1.get_effective_image(), en_article1.image)
# if the translated article has an image it should return its image
fr_article1.image = self.image2
fr_article1.save()
self.assertEqual(
fr_article1.get_effective_image(), self.image2)
def test_number_of_child_sections(self):
new_section = self.mk_section(self.section_index)
self.mk_sections(new_section, count=12)
self.client.get('/')
response = self.client.get('/sections-main-1/test-section-0/')
self.assertContains(response, 'Test Section 11')
def test_number_of_child_articles_in_section(self):
new_section = self.mk_section(self.section_index)
self.mk_articles(new_section, count=12)
request = self.factory.get('/sections-main-1/test-section-0/')
request._wagtail_site = self.site
articles = load_child_articles_for_section(
{'request': request, 'locale_code': 'en'}, new_section, count=None)
self.assertEqual(len(articles), 12)
def test_parent_section(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section")
new_section1 = self.mk_section(
new_section, title="New Section 1", slug="new-section-1")
self.assertEqual(
new_section1.get_parent_section('en'), new_section)
def test_article_service_aggregator(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section",
is_service_aggregator=True)
with self.assertRaises(ValidationError):
self.mk_article(
new_section, title="New Section 1", slug="new-section-1",
featured_in_latest=True)
def test_section_service_aggregator(self):
with self.assertRaises(ValidationError):
self.mk_section(
self.section_index, title="New Section", slug="new-section",
is_service_aggregator=True, monday_rotation=True)
def test_commenting_closed_settings_fallbacks(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section")
new_article = self.mk_article(new_section, title="New article")
# test fallback to section_index
self.section_index.commenting_state = constants.COMMENTING_CLOSED
self.section_index.save()
comment_settings = new_article.get_effective_commenting_settings()
self.assertEqual(
comment_settings['state'], constants.COMMENTING_CLOSED)
# test overriding settings in section
new_section.commenting_state = constants.COMMENTING_CLOSED
new_section.save()
comment_settings = new_article.get_effective_commenting_settings()
self.assertEqual(
comment_settings['state'], constants.COMMENTING_CLOSED)
# test overriding settings in article
new_article.commenting_state = constants.COMMENTING_DISABLED
new_article.save_revision().publish()
comment_settings = new_article.get_effective_commenting_settings()
self.assertEqual(
comment_settings['state'], constants.COMMENTING_DISABLED)
def test_commenting_allowed(self):
new_section = self.mk_section(
self.section_index, title="New Section", slug="new-section")
new_article = self.mk_article(
new_section, title="New article",
commenting_state=constants.COMMENTING_OPEN)
now = timezone.now()
# with commenting open
self.assertTrue(new_article.allow_commenting())
# with commenting disabled and no reopen_time given
new_article.commenting_state = constants.COMMENTING_DISABLED
self.assertFalse(new_article.allow_commenting())
# with commenting closed but past reopen time
new_article.commenting_state = constants.COMMENTING_CLOSED
new_article.commenting_open_time = now - timezone.timedelta(days=1)
self.assertTrue(new_article.allow_commenting())
# with commenting timestamped and within specified time
new_article.commenting_state = constants.COMMENTING_TIMESTAMPED
new_article.commenting_open_time = now - timezone.timedelta(days=1)
new_article.commenting_close_time = now + timezone.timedelta(days=1)
self.assertTrue(new_article.allow_commenting())
# with commenting closed and not yet reopen_time
new_article.commenting_state = constants.COMMENTING_CLOSED
new_article.commenting_open_time = now + timezone.timedelta(days=1)
self.assertFalse(new_article.allow_commenting())
def test_commenting_enabled(self):
article_1 = ArticlePage(
title="New article", commenting_state=constants.COMMENTING_OPEN)
self.assertTrue(article_1.is_commenting_enabled())
article_2 = ArticlePage(
title="New article", commenting_state=constants.COMMENTING_CLOSED)
self.assertTrue(article_2.is_commenting_enabled())
article_3 = ArticlePage(
title="New article",
commenting_state=constants.COMMENTING_DISABLED)
self.assertFalse(article_3.is_commenting_enabled())
def test_tags(self):
User.objects.create_superuser(
username='testuser', password='password', email='<EMAIL>')
self.client.login(username='testuser', password='password')
post_data = {
'title': 'this is a test article',
'slug': 'this-is-a-test-article',
'recommended_articles-INITIAL_FORMS': 0,
'recommended_articles-MAX_NUM_FORMS': 1000,
'recommended_articles-MIN_NUM_FORMS': 0,
'recommended_articles-TOTAL_FORMS': 0,
'nav_tags-INITIAL_FORMS': 0,
'nav_tags-MAX_NUM_FORMS': 1000,
'nav_tags-MIN_NUM_FORMS': 0,
'nav_tags-TOTAL_FORMS': 0,
'related_sections-INITIAL_FORMS': 0,
'related_sections-MAX_NUM_FORMS': 1000,
'related_sections-MIN_NUM_FORMS': 0,
'related_sections-TOTAL_FORMS': 0,
'body-count': 1,
'body-0-value': 'Hello',
'body-0-deleted': False,
'body-0-order': 1,
'body-0-type': 'paragraph',
'tags': 'love, war',
'action-publish': 'Publish',
'homepage_media-count': 0
}
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
post_data.update({
'title': 'this is a test article2',
'slug': 'this-is-a-test-article-2',
'tags': 'peace, war',
})
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
self.assertEqual(
ArticlePage.objects.filter(tags__name='war').count(), 2)
self.assertEqual(
ArticlePage.objects.filter(tags__name='love').count(), 1)
self.assertEqual(
ArticlePage.objects.filter(tags__name='peace').count(), 1)
def test_meta_data_tags(self):
User.objects.create_superuser(
username='testuser', password='password', email='<EMAIL>')
self.client.login(username='testuser', password='password')
post_data = {
'title': 'this is a test article',
'slug': 'this-is-a-test-article',
'recommended_articles-INITIAL_FORMS': 0,
'recommended_articles-MAX_NUM_FORMS': 1000,
'recommended_articles-MIN_NUM_FORMS': 0,
'recommended_articles-TOTAL_FORMS': 0,
'related_sections-INITIAL_FORMS': 0,
'related_sections-MAX_NUM_FORMS': 1000,
'related_sections-MIN_NUM_FORMS': 0,
'related_sections-TOTAL_FORMS': 0,
'nav_tags-INITIAL_FORMS': 0,
'nav_tags-MAX_NUM_FORMS': 1000,
'nav_tags-MIN_NUM_FORMS': 0,
'nav_tags-TOTAL_FORMS': 0,
'body-count': 1,
'body-0-value': 'Hello',
'body-0-deleted': False,
'body-0-order': 1,
'body-0-type': 'paragraph',
'metadata_tags': 'love, happiness',
'action-publish': 'Publish',
'homepage_media-count': 0
}
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
post_data.update({
'title': 'this is a test article2',
'slug': 'this-is-a-test-article-2',
'metadata_tags': 'peace, happiness',
})
self.client.post(
reverse('wagtailadmin_pages:add',
args=('core', 'articlepage', self.yourmind.id,)),
post_data)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='happiness').count(), 2)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='love').count(), 1)
self.assertEqual(
ArticlePage.objects.filter(
metadata_tags__name='peace').count(), 1)
def test_nav_tag_delete_updates_article(self):
"""
ArticlePageTags with no tags should not be saved
"""
tag_index = TagIndexPage.objects.child_of(self.main).first()
article = self.mk_article(
parent=self.yourmind, title='first_main_article')
article2 = self.mk_article(
parent=self.yourmind, title='second_main_article')
tag = Tag(title='New tag')
tag2 = Tag(title='Another New tag')
tag_index.add_child(instance=tag)
tag.save_revision().publish()
tag_index.add_child(instance=tag2)
tag2.save_revision().publish()
article.nav_tags.create(tag=tag)
article.save()
article2.nav_tags.create(tag=tag)
article2.nav_tags.create(tag=tag2)
article2.save()
self.assertEqual(article.nav_tags.get(tag=tag).tag,
article2.nav_tags.get(tag=tag).tag,
)
# delete the tag
tag.delete()
# test the nav_tags are deleted and removed from the articles
self.assertEqual(ArticlePageTags.objects.count(), 1)
self.assertFalse(article.nav_tags.filter(pk=1).exists())
self.assertTrue(article2.nav_tags.get(), tag2)
def test_social_media(self):
User.objects.create_superuser(
username='testuser', password='password', email='<EMAIL>')
self.client.login(username='testuser', password='password')
self.mk_article(
self.yourmind, title="New article",
social_media_title='media title',
social_media_description='media description', )
self.mk_article(
self.yourmind, title="New article2",
social_media_title='media title',
social_media_image=self.image, )
self.assertEqual(
ArticlePage.objects.filter(
social_media_title='media title').count(), 2)
self.assertEqual(
ArticlePage.objects.filter(
social_media_description='media description').count(), 1)
self.assertEqual(
ArticlePage.objects.filter(
social_media_image=self.image).count(), 1)
response = self.client.get('/sections-main-1/your-mind/new-article/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'content="media title"')
def test_site_languages(self):
main = Main.objects.all().first()
self.english = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='en',
| |
<filename>script_tools/parse_script.py<gh_stars>1-10
"""
This module parses a script and places all of the lines, episodes, characters, and scenes into the appropriate classes.
Author: <NAME>
"""
from containers.episode import Episode
from containers.line import Line
from containers.scene import Scene
from typing import List
import re
import pandas as pd
def parse_all_eps(
season_nums: List[int], episode_nums: List[int], debug: bool = False, script_dir: str = "./script_tools/scripts"
) -> List[Episode]:
"""
Parse all the episodes in the given seasons. That is, fetches all of the characters, lines, and scenes for each
episode.
Parameters
----------
season_num, episode_nums
The season and episodes that will be parsed.
debug
If specified, prints some messages that may help with debugging.
Returns
-------
episodes
The episodes with all of the information pulled out.
Notes
-----
If a season does not have a specified episode, it will be skipped. Hence, ``episode_nums`` should be the largest
episode number across all seasons. Episodes will be skipped if there is not corresponding entry in ``formats.txt``.
"""
episodes = []
# Each episode can be parsed slightly differently. This pandas dataframe will provide the keys used to determine
# how to parse each episode.
formats = pd.read_csv("./formats.txt", sep=" ", comment="#")
for season_num in season_nums:
for episode_num in episode_nums:
# Need the formats on how we parse the characters and scenes.
episode_format = formats[(formats["season_num"] == season_num) & (formats["episode_num"] == episode_num)]
# Some seasons don't have episodes 1-10. So try this and skip if we don't have.
try:
character_format = str(episode_format["character_format"].values[0])
scene_format = str(episode_format["scene_format"].values[0])
except IndexError:
continue
key = f"s{season_num:02}e{episode_num:02}"
script_path = f"{script_dir}/{key}.txt"
# Initialize class instance. This does not yet parse it but merely sets up the initial variables.
episode = Episode(season_num, episode_num, key, script_path)
episode.character_format = character_format
episode.scene_format = scene_format
episodes.append(episode)
# Now go through each episode and parse the script.
for episode in episodes:
parse_episode(episode.script_path, episode, debug)
return episodes
def parse_episode(fname: str, episode: Episode, debug: bool = False) -> None:
"""
Goes through an episode script and determines all of the lines, characters, and scenes.
Parameters
----------
fname
Path to the script.
episode
Episode instance that will be updated.
debug
If specified, prints some messages that may help with debugging.
Returns
-------
None. ``episode`` is updated directly.
"""
# There may be some episodes that don't have scripts yet. Skip these and print a message.
if episode.character_format == "NONE" and episode.scene_format == "NONE":
print(f"Script has been flagged as not existing for s{episode.season_num:02}"
f"e{episode.episode_num:02}. Skipping.")
return
# Start with a new scene.
episode.current_scene = Scene(episode.season_num, episode.episode_num)
with open(fname, "r") as f:
# Strictly speaking I don't need to loop over lines here to get the character
# lines. I could instead pass the entire file and use regex to pull out all the
# character lines. However, we want to pull out scenes
# chronologically. Hence it will be useful to iterate line-by-line.
for line in f:
# Ignore empty lines.
try:
_ = (line.split())[0]
except IndexError:
continue
# Parse the line to see if a character spoke it (and add to the appropriate character).
parse_character_line(line, episode, debug=debug)
# Add the final scene to the episode.
episode.scenes.append(episode.current_scene)
def parse_character_line(line: str, episode: Episode, debug: bool = False) -> None:
"""
Parses a single line of text from the script and adds the data to ``episode``.
"""
if debug:
print("Line {0}".format(line))
# The format of the character line will change slightly depending upon the episode and season. Separate the line
# into the character name and their spoken line.
spoken_line = regex_character_line(line, episode, debug=debug)
# A character didn't speak this line.
if spoken_line is None:
# However, it could be the case that we've hit a scene change.
scene_change = determine_if_scene_change(line, episode, debug=debug)
if scene_change:
# If so, add all of lines to the list and reset the tracking.
# Careful, maybe something happened and there weren't actually any lines added
# to this scene yet.
if len(episode.current_scene.lines) > 0:
episode.scenes.append(episode.current_scene)
episode.current_scene = Scene(episode.season_num, episode.episode_num)
return
# At this point, we have verified that a character spoke the line. Add some extra info for further tracking.
spoken_line.season_num = episode.season_num
spoken_line.episode_num = episode.episode_num
character_name = spoken_line.character_name
# episode.character_line is a dict["character_name": list of Lines].
# So let's check if we have already instantiated this character. If not, initialize.
if character_name not in episode.character_lines:
episode.character_lines[character_name] = []
# Update the spoken line.
episode.character_lines[character_name].append(spoken_line)
# Update the scene this character spoke in.
episode.current_scene.lines.append(spoken_line)
def determine_if_scene_change(line: str, episode: Episode, debug: bool = False) -> bool:
"""
Determines if ``line`` corresponds to a scene change. This determination is based on how a scene change is defined
as based on the format in ``formats.txt`` and stored in :py:attr:`~containers.episode.scene_format`.
"""
scene_change = False
if episode.scene_format == "SCENE":
if "Scene shift" in line or "Blackout" in line or "scene" in line.lower():
scene_change = True
elif episode.scene_format == "DASHES":
if "\- - -" in line or "\---" in line: # noqa: W605
scene_change = True
elif episode.scene_format == "STARS":
if "* * *" in line or "***" in line:
scene_change = True
elif episode.scene_format == "INT/EXT":
if "INT" in line or "EXT" in line or "Interior" in line or "Exterior" in line:
scene_change = True
elif episode.scene_format == "CUT":
if "CUT TO" in line:
scene_change = True
elif episode.scene_format == "INT/EXT/CUT":
if "INT" in line or "EXT" in line or "CUT TO" in line:
scene_change = True
return scene_change
def regex_character_line(line: str, episode: Episode, debug: bool = False) -> Line:
"""
Parses a single line spoken by a character to determine the name of the character speaking and the actual line
spoken.
Parameters
----------
line
The character name and the line that was spoken by the character.
episode
The episode instance that this line was spoken in.
debug : optional
If specified, prints out some messages that may be useful for debugging.
Returns
-------
spoken_line
The line alongside the name of the character bundled into a :py:class:`~containers.line.Line` instance.
"""
# These are all scene descriptions.
if line[0] == "[" or line[0] == "_" or "CUT TO" in line or "_CUT" in line or "INT" in line or "EXT" in line:
return None
# These could probably be bundled into a single function and be smarter. But eh.
if episode.character_format == "**CHARACTER_NAME:**":
spoken_line = parse_stars_character_line(line, debug=debug)
elif episode.character_format == "CHARACTER_NAME:":
spoken_line = parse_capital_character_line(line, debug=debug)
else:
print(f"Character format for s{episode.season_num:02}e{episode.episode_num:02} "
"is {episode.character_format}. This is not a recognised format.")
raise ValueError
return spoken_line
def parse_capital_character_line(line: str, debug: bool = False) -> Line:
"""
Parse a line where the line start with ``CHARACTER_NAME:``.
"""
# A line spoken by a character will start with "CHARACTER_NAME:".
# Search for any word starting with a capital word followed by a ":".
reg_exp = re.compile(r"([A-Z].*\:)")
character_line = reg_exp.split(line) # Split on this search.
if debug:
print("Character Line {0}".format(character_line))
# A line spoken by a character will return a list of the form...
# ['', CHARACTER_NAME:, <Spoken line>]
# Garbage lines will have length less than 3.
if len(character_line) < 3:
return None
# The character name has an extra ":" at the end. Eliminate it.
character_name = character_line[1][:-1]
# Finally, strip any whitespace round the outside, round the outside.
character_name = character_name.strip()
# To be a valid line, all letters must be upper case.
if character_name != character_name.upper():
return None
# The spoken line is the final element of `character_line`.
spoken_line = character_line[2]
# Now there is an annoying "\n" at the end of each line. Eliminate it...
spoken_line = (spoken_line.split("\n"))[0]
# Still a little bit of white space at the start and end.
spoken_line = spoken_line.strip()
# The webpage has an alphabet on it for navigation. Since these letters are capital
# letters, they've been captured by our method. In these instances, the
# `spoke_line` is empty. So if the spoken line is empty, don't count anything.
if spoken_line == "":
return None
# At this point we're sure it was | |
<filename>airtable/airtable.py
# -*- coding: utf-8 -*-
"""Python Airtable Client Library
This is an Airtable client library for python.
This library has wrapped what is provided functions by Airtable API.
When you use this library, you are able to easy implements Airtable's operations.
Thereby, you are able to down a lot of costs what are implementing and maintenancing.
Make factory instance per a BASE. And make client instance per a table.
Airtable用のPythonクライアントライブラリです。
このライブラリはAirtable APIが提供している機能をラッピングしたものです。
このライブラリを使えば容易にAirtableの操作を実装できます。
それによって、実装コストやメンテナンスコストが大幅に低減できるでしょう。
ベース毎のファクトリインスタンスを生成し、テーブル毎にクライアントインスタンスを生成してください。
Insipred by gtalarico/airtable-python-wrapper. It is also great library, thanks.
(https://github.com/gtalarico/airtable-python-wrapper)
"""
import requests
import json
import posixpath
import time
from urllib.parse import quote
from urllib.parse import urlencode
import enum
import sys
from requests.auth import AuthBase
class SortDirection(enum.Enum):
"""ソート順の列挙型
sortオプションに渡す値です。
:param ASC: 昇順
:type ASC: string
:param DESC: 降順
:type DESC: string
"""
ASC = 'asc'
DESC = 'desc'
class AirtableSorter:
"""ソートの設定を構築するクラス
"""
def __init__(self):
"""コンストラクタ
"""
self.sort = []
pass
def append(self, field, direction=SortDirection.ASC):
"""ソートの設定を追加
チェーンメソッド方式で追加できます。
>>> sorter = AirtableSorter()
>>> sorter.append('FieldA').append('FieldB', SortDirection.DESC)
:param field: ソート対象のフィールド名
:type field: string
:param direction: ソート順, defaults to SortDirection.ASC
:type direction: SortDirection, optional
:return: self
:rtype: AirtableSorter
"""
if type(direction) is SortDirection:
direction_value = direction.value
else:
direction_value = direction
self.sort.append({
'field': field,
'direction': direction_value
})
#print(self.sort)
return self
def build(self):
"""sortのクエリパラメータを構築
appendで追加されたソート順にパラメータを構築します。
>>> query = sorter.build()
:return: クエリパラメータのオブジェクト
:rtype: dict
"""
query = {}
idx = 0
for item in self.sort:
#print(item)
field = item['field']
direction = item['direction']
query['sort[' + str(idx) + '][field]'] = field
query['sort[' + str(idx) + '][direction]'] = direction
idx += 1
return query
@classmethod
def make_params(self, params, sort):
"""クエリパラメータオブジェクトにsortを追加
リクエスト用のクエリパラメータを構築します。
クエリパラメータ用のオブジェクトは以下のクエリパラメータを設定します。
sort[0][field]={field}&sort[0][direction]={direction}&sort[1][field]={field}&sort[1][direction]={direction}...&sort[n][field]={field}&sort[n][direction]={direction}
sortパラメータは3種類の形式で指定可能です。
- AirtableSorter
appendを用いた設定済みのAirtableSorterオブジェクト
- dict型の単一フィールド指定
>>>
{
'field': 'field0',
'direction': 'asc'
}
- list型の複数フィールド指定
>>>
[
{'field': 'field0', 'direction': 'asc'},
{'field': 'field1', 'direction': 'asc'},
{'field': 'field2', 'direction': 'asc'}
]
:param params: クエリパラメータ構築用のオブジェクト
:type params: dict
:param sort: ソート順
:type sort: AirtableSorter|dict|list
:return: クエリパラメータオブジェクト
:rtype: dict
"""
p = params
if type(sort) is AirtableSorter:
p.update(sort.build())
elif isinstance(sort, dict):
p['sort[0][field]'] = sort['field']
if 'direction' in sort:
p['sort[0][direction]'] = sort['direction']
else:
p['sort[0][direction]'] = SortDirection.ASC.value
elif isinstance(sort, list):
cnt = 0
for sort_item in sort:
if isinstance(sort_item, dict):
p['sort[' + str(cnt) + '][field]'] = sort_item['field']
if 'direction' in sort_item:
p['sort[' + str(cnt) + '][direction]'] = sort_item['direction']
else:
p['sort[' + str(cnt) + '][direction]'] = SortDirection.ASC.value
else:
p['sort[' + str(cnt) + '][field]'] = sort_item
p['sort[' + str(cnt) + '][direction]'] = SortDirection.ASC.value
cnt += 1
else:
pass
return p
class AirtableResponse(object):
"""レスポンスクラス
:param object: objectを継承
:type object: object
"""
def __init__(self, records=[], offset=None, errors=[]):
"""コンストラクタ
:param records: HTTPレスポンスのrecords, defaults to []
:type records: list, optional
:param offset: HTTPレスポンスから返却されるページオフセット値, defaults to None
:type offset: string, optional
:param errors: HTTPレスポンスから返却されるエラー文言, defaults to []
:type errors: list, optional
"""
self._records = records
self._offset = offset
self._errors = errors
pass
@property
def records(self):
"""recordsのgetter
>>> print(r.records)
[
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}}
]
:return: コンストラクタにセットしたrecords
:rtype: list
"""
return self._records
@property
def offset(self):
"""offsetのgetter
:return: コンストラクタにセットしたoffset
:rtype: string
"""
return self._offset
@property
def errors(self):
"""errorsのgetter
:return: コンストラクタにセットしたerrors
:rtype: list
"""
return self._errors
def size(self):
"""recordsの要素数を取得
:return: recordsの要素数(=レコード数)
:rtype: int
"""
if isinstance(self.records, list):
return len(self.records)
elif isinstance(self.records, dict):
return 1
else:
return 0
def get(self, index=None):
"""recordsを取得
0〜n件のレコードを返却。要素番号を指定した場合は、その要素のレコードを返却。
>>> print(r.get())
[
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}}
]
:param index: recordsの要素番号, defaults to None
:type index: int, optional
:return: 0〜n件のレコード
:rtype: list, dict
"""
if isinstance(self._records, dict):
return self._records
else:
if self.size() == 1:
return self._records[0]
elif self.size() > 1:
if index:
return self._records[index]
else:
return self._records
else:
return []
def get_list(self, index=None):
"""recordsを取得
0〜n件のレコードをlistで返却。要素番号を指定した場合は、その要素のレコードを返却。
>>> print(r.get())
[
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}},
{'id': 'XXX', 'fields': {...}}
]
:param index: recordsの要素番号, defaults to None
:type index: int, optional
:return: 0〜n件のレコード
:rtype: list
"""
records = self.get(index)
if isinstance(records, list):
return records
else:
return [records]
def get_ids(self):
"""レコードIDのリストを取得
>>> print(r.get_ids())
['XXX', 'XXX', 'XXX']
:return: レコードIDの一次元配列
:rtype: list
"""
if self.size() == 1:
return self.get()['id']
elif self.size() > 1:
return [record['id'] for record in self.get()]
else:
return []
class AirtableAuth(AuthBase):
"""Airtableの認証クラス
:param AuthBase: AuthBaseクラスを継承
:type AuthBase: requests.auth.AuthBase
"""
def __init__(self, api_key):
"""コンストラクタ
:param api_key: AirtableのAPIキー
:type api_key: string
"""
self.api_key = api_key
def __call__(self, r):
"""リクエスト送信時に呼び出され、認証ヘッダーを付与する
"""
r.headers['Authorization'] = 'Bearer ' + self.api_key
return r
class AirtableClient(object):
"""Airtableクライアントクラス
:param object: objectクラスを継承
:type object: object
"""
_VERSION = 'v0'
_API_BASE_URL = 'https://api.airtable.com'
_API_URL = posixpath.join(_API_BASE_URL, _VERSION)
_API_LIMIT = 1.0 / 5 # 5 per second
_MAX_RECORDS_PER_REQUEST = 10
def __init__(self, base_id, table_name, api_key, debug=False):
"""コンストラクタ
:param base_id: AirtableのBASE ID
:type base_id: string
:param table_name: Airtableのテーブル名
:type table_name: string
:param api_key: AirtableのAPIキー
:type api_key: string
:param debug: デバッグモードのフラグ(True:ON/False:OFF), defaults to False
:type debug: bool, optional
"""
session = requests.Session()
session.auth = AirtableAuth(api_key=api_key)
self.session = session
self.debug = debug
self.BASE_URL = posixpath.join(self._API_URL, base_id, quote(table_name))
pass
def _make_single_condition(self, field, value):
"""filterByFormulaのfield=value条件式を1つ構築して返却
:param field: フィールド名
:type field: string
:param value: 検索値
:type value: string
:return: {field}=valueの文字列
:rtype: string
"""
return '{' + str(field) + '}="' + str(value) + '"'
def _make_params(self, formula=None, offset=None, sort=None, max_records=None, fields=None, view=None):
"""リクエストパラメータを構築
:param formula: filterByFormula値, defaults to None
:type formula: string, optional
:param offset: offset値, defaults to None
:type offset: string, optional
:param sort: sort値, defaults to None
:type sort: AirtableSorter|dict|list, optional
:param max_records: maxRecords値, defaults to None ※未指定の場合はデフォルトで100件
:type max_records: int, optional
:param fields: fields値, defaults to None
:type fields: list, optional
:param view: view値, defaults to None
:type view: string, optional
:return: リクエストパラメータのオブジェクト
:rtype: dict
"""
p = {}
if formula:
p['filterByFormula'] = formula
if offset:
p['offset'] = offset
if sort:
p = AirtableSorter.make_params(p, sort)
if max_records:
p['maxRecords'] = max_records
if fields:
p['fields'] = []
for field in fields:
p['fields'].append(field)
if view:
p['view'] = view
return p
def _process_response_error(self, response):
"""HTTPレスポンスのエラー処理
:param response: レスポンスオブジェクト
:type response: requests.Response
:raises exc: HTTPErrorをキャッチした場合は送出
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exc:
err_msg = str(exc)
try:
error_dict = response.json()
except ValueError:
pass
else:
if "error" in error_dict:
err_msg += " [Error: {}]".format(error_dict["error"])
exc.args = (*exc.args, err_msg)
raise exc
else:
return response.json()
def _process_response(self, response):
"""HTTPレスポンスの事後処理
:param response: レスポンスオブジェクト
:type response: requests.Response
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
result_dict = self._process_response_error(response)
if self.debug:
print(result_dict)
if 'error' in result_dict:
return {'records': [], 'error': result_dict['error']}
else:
return result_dict
def _request(self, method, url, params=None, json_data=None):
"""HTTPリクエスト送信
:param method: HTTPメソッド
:type method: string
:param url: リクエストURL
:type url: string
:param params: リクエストパラメータオブジェクト, defaults to None
:type params: dict, optional
:param json_data: リクエストJSONデータオブジェクト, defaults to None
:type json_data: dict, optional
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
response = self.session.request(method, url, params=params, json=json_data)
if self.debug:
print(response.url)
return self._process_response(response)
def _get(self, formula=None, offset=None, sort=None, max_records=None, fields=None, view=None):
"""GETリクエスト送信
:param formula: filterByFormula値, defaults to None
:type formula: string, optional
:param offset: offset値, defaults to None
:type offset: string, optional
:param sort: sort値, defaults to None
:type sort: AirtableSorter|dict|list, optional
:param max_records: maxRecords値, defaults to None ※未指定の場合はデフォルトで100件
:type max_records: int, optional
:param fields: fields値, defaults to None
:type fields: list, optional
:param view: view値, defaults to None
:type view: string, optional
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
url = self.BASE_URL
p = self._make_params(formula, offset, sort, max_records, fields, view)
return self._request('get', url, params=p)
def _post(self, data):
"""POSTリクエスト送信
:param data: リクエストJSONデータオブジェクト
:type data: dict
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
url = self.BASE_URL
if self.debug:
print(data)
return self._request('post', url, json_data=data)
def _patch(self, id, data):
"""PATCHリクエスト送信
:param id: レコードID
:type id: string
:param data: リクエストJSONデータオブジェクト
:type data: dict
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
url = posixpath.join(self.BASE_URL, id)
return self._request('patch', url, json_data=data)
def _delete(self, id):
"""DELETEリクエスト送信
:param id: レコードID
:type id: string
:return: HTTPレスポンスボディのJSONオブジェクト
:rtype: dict
"""
url = posixpath.join(self.BASE_URL, id)
return self._request('delete', url)
def _chunk(self, iterable, length):
"""チャンク処理(分割処理)
:param iterable: 処理対象のイテラブルオブジェクト
:type iterable: object
:param length: チャンクサイズ
:type length: int
:yield: [description]
:rtype: [type]
"""
for i in range(0, len(iterable), length):
yield iterable[i : i + length]
def _build_batch_records(self, fields_list):
"""一括処理用のレコードリストを構築
:param fields_list: fieldsのリスト
:type fields_list: list
:return: recordsにセットするリスト
:rtype: list
"""
return [{"fields": fields} for fields in fields_list]
| |
(JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}],
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
Output:
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]]
"""
if output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations_from_parsed_result only supports Relation or triplet.")
return self.relation_extractor.extract_from_parsed_result(
parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw
)
def extract_relations_from_text(self, text, output_format="Relation", in_order=True, annotators=None, **kw):
""" Extract relations from a raw text and extracted eventualities
:param text: a raw text
:type text: str
:param output_format: which format to return, "Relation" or "triplet"
:type output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted relations
:rtype: Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]]
"""
if output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations_from_text only supports Relation or triplet.")
parsed_result = self.parse_text(text, annotators=annotators)
para_eventualities = self.extract_eventualities_from_parsed_result(parsed_result)
return self.extract_relations_from_parsed_result(
parsed_result, para_eventualities, output_format=output_format, in_order=in_order, **kw
)
def extract_from_parsed_result(
self,
parsed_result,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
**kw
):
""" Extract both eventualities and relations from a parsed result
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]]
.. highlight:: python
.. code-block:: python
Input:
[{'dependencies': [(1, 'nmod:poss', 0),
(3, 'nsubj', 1),
(3, 'aux', 2),
(3, 'dobj', 5),
(3, 'punct', 6),
(5, 'nmod:poss', 4)],
'lemmas': ['my', 'army', 'will', 'find', 'you', 'boat', '.'],
'mentions': [],
'ners': ['O', 'O', 'O', 'O', 'O', 'O', 'O'],
'parse': '(ROOT (S (NP (PRP$ My) (NN army)) (VP (MD will) (VP (VB find) (NP '
'(PRP$ your) (NN boat)))) (. .)))',
'pos_tags': ['PRP$', 'NN', 'MD', 'VB', 'PRP$', 'NN', '.'],
'text': 'My army will find your boat.',
'tokens': ['My', 'army', 'will', 'find', 'your', 'boat', '.']},
{'dependencies': [(2, 'case', 0),
(2, 'det', 1),
(6, 'nmod:in', 2),
(6, 'punct', 3),
(6, 'nsubj', 4),
(6, 'cop', 5),
(6, 'ccomp', 9),
(6, 'punct', 13),
(9, 'nsubj', 7),
(9, 'aux', 8),
(9, 'iobj', 10),
(9, 'dobj', 12),
(12, 'amod', 11)],
'lemmas': ['in',
'the',
'meantime',
',',
'I',
'be',
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodation',
'.'],
'mentions': [],
'ners': ['O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O',
'O'],
'parse': '(ROOT (S (PP (IN In) (NP (DT the) (NN meantime))) (, ,) (NP (PRP '
"I)) (VP (VBP 'm) (ADJP (JJ sure) (SBAR (S (NP (PRP we)) (VP (MD "
'could) (VP (VB find) (NP (PRP you)) (NP (JJ suitable) (NNS '
'accommodations)))))))) (. .)))',
'pos_tags': ['IN',
'DT',
'NN',
',',
'PRP',
'VBP',
'JJ',
'PRP',
'MD',
'VB',
'PRP',
'JJ',
'NNS',
'.'],
'text': "In the meantime, I'm sure we could find you suitable "
'accommodations.',
'tokens': ['In',
'the',
'meantime',
',',
'I',
"'m",
'sure',
'we',
'could',
'find',
'you',
'suitable',
'accommodations',
'.']}],
[[my army will find you boat],
[i be sure, we could find you suitable accommodation]]
Output:
([[my army will find you boat],
[i be sure, we could find you suitable accommodation]],
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]])
"""
if eventuality_output_format not in ["Eventuality", "json"]:
raise NotImplementedError("Error: extract_eventualities only supports Eventuality or json.")
if relation_output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations only supports Relation or triplet.")
if not isinstance(parsed_result, (list, tuple, dict)):
raise NotImplementedError
if isinstance(parsed_result, dict):
is_single_sent = True
parsed_result = [parsed_result]
else:
is_single_sent = False
para_eventualities = self.extract_eventualities_from_parsed_result(
parsed_result, output_format="Eventuality", in_order=True, **kw
)
para_relations = self.extract_relations_from_parsed_result(
parsed_result, para_eventualities, output_format="Relation", in_order=True, **kw
)
if in_order:
if eventuality_output_format == "json":
para_eventualities = [[eventuality.encode(encoding=None) for eventuality in sent_eventualities] \
for sent_eventualities in para_eventualities]
if relation_output_format == "triplet":
para_relations = [list(chain.from_iterable([relation.to_triplet() for relation in sent_relations])) \
for sent_relations in para_relations]
if is_single_sent:
return para_eventualities[0], para_relations[0]
else:
return para_eventualities, para_relations
else:
eid2eventuality = dict()
for eventuality in chain.from_iterable(para_eventualities):
eid = eventuality.eid
if eid not in eid2eventuality:
eid2eventuality[eid] = deepcopy(eventuality)
else:
eid2eventuality[eid].update(eventuality)
if eventuality_output_format == "Eventuality":
eventualities = sorted(eid2eventuality.values(), key=lambda e: e.eid)
elif eventuality_output_format == "json":
eventualities = sorted(
[eventuality.encode(encoding=None) for eventuality in eid2eventuality.values()],
key=lambda e: e["eid"]
)
rid2relation = dict()
for relation in chain.from_iterable(para_relations):
if relation.rid not in rid2relation:
rid2relation[relation.rid] = deepcopy(relation)
else:
rid2relation[relation.rid].update(relation)
if relation_output_format == "Relation":
para_relations = sorted(rid2relation.values(), key=lambda r: r.rid)
elif relation_output_format == "triplet":
para_relations = sorted(chain.from_iterable([relation.to_triplets() for relation in rid2relation.values()]))
return eventualities, para_relations
def extract_from_text(
self,
text,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
annotators=None,
**kw
):
""" Extract both eventualities and relations from a raw text
:param text: a raw text
:type text: str
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param annotators: annotators for corenlp, please refer to https://stanfordnlp.github.io/CoreNLP/annotators.html
:type annotators: Union[List, None] (default = None)
:param kw: other parameters
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: :rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, object]]], List[aser.relation.Relation], List[Dict[str, object]]]]
.. highlight:: python
.. code-block:: python
Input:
"My army will find your boat. In the meantime, I'm sure we could find you suitable accommodations."
Output:
([[my army will find you boat],
[i be sure, we could find you suitable accommodation]],
[[],
[(7d9ea9023b66a0ebc167f0dbb6ea8cd75d7b46f9, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Co_Occurrence': 1.0})],
[(8540897b645962964fd644242d4cc0032f024e86, 25edad6781577dcb3ba715c8230416fb0d4c45c4, {'Synchronous': 1.0})]])
"""
if eventuality_output_format not in ["Eventuality", "json"]:
raise NotImplementedError("Error: extract_eventualities only supports Eventuality or json.")
if relation_output_format not in ["Relation", "triplet"]:
raise NotImplementedError("Error: extract_relations only supports Relation or triplet.")
parsed_result = self.parse_text(text, annotators=annotators)
return self.extract_from_parsed_result(
parsed_result,
eventuality_output_format=eventuality_output_format,
relation_output_format=relation_output_format,
in_order=in_order,
**kw
)
class SeedRuleASERExtractor(BaseASERExtractor):
""" ASER Extractor based on rules to extract both eventualities and relations (for ASER v1.0)
"""
def __init__(self, corenlp_path="", corenlp_port=0, **kw):
if "annotators" not in kw:
kw["annotators"] = list(ANNOTATORS)
if "parse" in kw["annotators"]:
kw["annotators"].pop("parse")
if "depparse" not in kw["annotators"]:
kw["annotator"].append("depparse")
super().__init__(corenlp_path, corenlp_port, **kw)
from .rule import CLAUSE_WORDS
self.eventuality_extractor = SeedRuleEventualityExtractor(
corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port, skip_words=CLAUSE_WORDS, **kw
)
self.relation_extractor = SeedRuleRelationExtractor(**kw)
class DiscourseASERExtractor(BaseASERExtractor):
""" ASER Extractor based on discourse parsing to extract both eventualities and relations (for ASER v2.0)
"""
def __init__(self, corenlp_path="", corenlp_port=0, **kw):
if "annotators" not in kw:
kw["annotators"] = list(ANNOTATORS)
if "depparse" in kw["annotators"]:
kw["annotator"].pop("depparse")
if "parse" not in kw["annotators"]:
kw["annotators"].append("parse")
super().__init__(corenlp_path, corenlp_port, **kw)
self.eventuality_extractor = DiscourseEventualityExtractor(
corenlp_path=self.corenlp_path, corenlp_port=self.corenlp_port, **kw
)
self.relation_extractor = DiscourseRelationExtractor(**kw)
def extract_from_parsed_result(
self,
parsed_result,
eventuality_output_format="Eventuality",
relation_output_format="Relation",
in_order=True,
**kw
):
""" Extract both eventualities and relations from a parsed result
:param parsed_result: the parsed result returned by corenlp
:type parsed_result: List[Dict[str, object]]
:param eventuality_output_format: which format to return eventualities, "Eventuality" or "json"
:type eventuality_output_format: str (default = "Eventuality")
:param relation_output_format: which format to return relations, "Relation" or "triplet"
:type relation_output_format: str (default = "Relation")
:param in_order: whether the returned order follows the input token order
:type in_order: bool (default = True)
:param kw: other parameters (e.g., syntax_tree_cache)
:type kw: Dict[str, object]
:return: the extracted eventualities and relations
:rtype: :rtype: Tuple[Union[List[List[aser.eventuality.Eventuality]], List[List[Dict[str, object]]], List[aser.eventuality.Eventuality], List[Dict[str, object]]], Union[List[List[aser.relation.Relation]], List[List[Dict[str, | |
find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Action_Reference':
obj_ = cybox_core.ActionReferenceType.factory()
obj_.build(child_)
self.Action_Reference.append(obj_)
# end class ActionEquivalenceType
class ActionEquivalenceListType(GeneratedsSuper):
"""The ActionEquivalenceListType captures a list of Action
Equivalences."""
subclass = None
superclass = None
def __init__(self, Action_Equivalence=None):
if Action_Equivalence is None:
self.Action_Equivalence = []
else:
self.Action_Equivalence = Action_Equivalence
def factory(*args_, **kwargs_):
if ActionEquivalenceListType.subclass:
return ActionEquivalenceListType.subclass(*args_, **kwargs_)
else:
return ActionEquivalenceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Action_Equivalence(self): return self.Action_Equivalence
def set_Action_Equivalence(self, Action_Equivalence): self.Action_Equivalence = Action_Equivalence
def add_Action_Equivalence(self, value): self.Action_Equivalence.append(value)
def insert_Action_Equivalence(self, index, value): self.Action_Equivalence[index] = value
def hasContent_(self):
if (
self.Action_Equivalence
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='ActionEquivalenceListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='ActionEquivalenceListType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ActionEquivalenceListType'):
pass
def exportChildren(self, write, level, namespace_='maecPackage:', name_='ActionEquivalenceListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Action_Equivalence_ in self.Action_Equivalence:
Action_Equivalence_.export(write, level, 'maecPackage:', name_='Action_Equivalence', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Action_Equivalence':
obj_ = ActionEquivalenceType.factory()
obj_.build(child_)
self.Action_Equivalence.append(obj_)
# end class ActionEquivalenceListType
class CapturedProtocolListType(GeneratedsSuper):
"""The CapturedProtocolListType specifies a list of network protocols
that a malware analysis environment may capture or interact
with."""
subclass = None
superclass = None
def __init__(self, Protocol=None):
if Protocol is None:
self.Protocol = []
else:
self.Protocol = Protocol
def factory(*args_, **kwargs_):
if CapturedProtocolListType.subclass:
return CapturedProtocolListType.subclass(*args_, **kwargs_)
else:
return CapturedProtocolListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Protocol(self): return self.Protocol
def set_Protocol(self, Protocol): self.Protocol = Protocol
def add_Protocol(self, value): self.Protocol.append(value)
def insert_Protocol(self, index, value): self.Protocol[index] = value
def hasContent_(self):
if (
self.Protocol
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='CapturedProtocolListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='CapturedProtocolListType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='CapturedProtocolListType'):
pass
def exportChildren(self, write, level, namespace_='maecPackage:', name_='CapturedProtocolListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Protocol_ in self.Protocol:
Protocol_.export(write, level, 'maecPackage:', name_='Protocol', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Protocol':
obj_ = CapturedProtocolType.factory()
obj_.build(child_)
self.Protocol.append(obj_)
# end class CapturedProtocolListType
class CapturedProtocolType(GeneratedsSuper):
"""The CapturedProtocolType specifies the details of a network protocol
that may be captured or otherwise manipulated in the malware
analysis environment.The layer7_protocol field specifies the
name of the Layer 7 network protocol (OSI model) captured or
manipulated by the analysis environment.The layer4_protocol
field specifies the name of the Layer 4 network protocol (OSI
model) captured or manipulated by the analysis environment.The
port_number field specifies the port number for this network
protocol that is captured or manipulated by the analysis
environment.The interaction_level field specifies the relative
level of interaction that the analysis environment has with the
specified network protocol."""
subclass = None
superclass = None
def __init__(self, layer7_protocol=None, port_number=None, interaction_level=None, layer4_protocol=None):
self.layer7_protocol = _cast(None, layer7_protocol)
self.port_number = _cast(int, port_number)
self.interaction_level = _cast(None, interaction_level)
self.layer4_protocol = _cast(None, layer4_protocol)
pass
def factory(*args_, **kwargs_):
if CapturedProtocolType.subclass:
return CapturedProtocolType.subclass(*args_, **kwargs_)
else:
return CapturedProtocolType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_layer7_protocol(self): return self.layer7_protocol
def set_layer7_protocol(self, layer7_protocol): self.layer7_protocol = layer7_protocol
def get_port_number(self): return self.port_number
def set_port_number(self, port_number): self.port_number = port_number
def get_interaction_level(self): return self.interaction_level
def set_interaction_level(self, interaction_level): self.interaction_level = interaction_level
def get_layer4_protocol(self): return self.layer4_protocol
def set_layer4_protocol(self, layer4_protocol): self.layer4_protocol = layer4_protocol
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='CapturedProtocolType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='CapturedProtocolType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='CapturedProtocolType'):
if self.layer7_protocol is not None and 'layer7_protocol' not in already_processed:
already_processed.add('layer7_protocol')
write(' layer7_protocol=%s' % (quote_attrib(self.layer7_protocol), ))
if self.port_number is not None and 'port_number' not in already_processed:
already_processed.add('port_number')
write(' port_number="%s"' % self.gds_format_integer(self.port_number, input_name='port_number'))
if self.interaction_level is not None and 'interaction_level' not in already_processed:
already_processed.add('interaction_level')
write(' interaction_level=%s' % (quote_attrib(self.interaction_level), ))
if self.layer4_protocol is not None and 'layer4_protocol' not in already_processed:
already_processed.add('layer4_protocol')
write(' layer4_protocol=%s' % (quote_attrib(self.layer4_protocol), ))
def exportChildren(self, write, level, namespace_='maecPackage:', name_='CapturedProtocolType', fromsubclass_=False, pretty_print=True):
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('layer7_protocol', node)
if value is not None and 'layer7_protocol' not in already_processed:
already_processed.add('layer7_protocol')
self.layer7_protocol = value
value = find_attr_value_('port_number', node)
if value is not None and 'port_number' not in already_processed:
already_processed.add('port_number')
try:
self.port_number = int(value)
except ValueError as exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
if self.port_number <= 0:
raise_parse_error(node, 'Invalid PositiveInteger')
value = find_attr_value_('interaction_level', node)
if value is not None and 'interaction_level' not in already_processed:
already_processed.add('interaction_level')
self.interaction_level = value
value = find_attr_value_('layer4_protocol', node)
if value is not None and 'layer4_protocol' not in already_processed:
already_processed.add('layer4_protocol')
self.layer4_protocol = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class CapturedProtocolType
class ObjectEquivalenceListType(GeneratedsSuper):
"""The ObjectEquivalenceListType captures a list of Object
Equivalences."""
subclass = None
superclass = None
def __init__(self, Object_Equivalence=None):
if Object_Equivalence is None:
self.Object_Equivalence = []
else:
self.Object_Equivalence = Object_Equivalence
def factory(*args_, **kwargs_):
if ObjectEquivalenceListType.subclass:
return ObjectEquivalenceListType.subclass(*args_, **kwargs_)
else:
return ObjectEquivalenceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Object_Equivalence(self): return self.Object_Equivalence
def set_Object_Equivalence(self, Object_Equivalence): self.Object_Equivalence = Object_Equivalence
def add_Object_Equivalence(self, value): self.Object_Equivalence.append(value)
def insert_Object_Equivalence(self, index, value): self.Object_Equivalence[index] = value
def hasContent_(self):
if (
self.Object_Equivalence
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='ObjectEquivalenceListType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='ObjectEquivalenceListType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ObjectEquivalenceListType'):
pass
def exportChildren(self, write, level, namespace_='maecPackage:', name_='ObjectEquivalenceListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Object_Equivalence_ in self.Object_Equivalence:
Object_Equivalence_.export(write, level, 'maecPackage:', name_='Object_Equivalence', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Object_Equivalence':
obj_ = ObjectEquivalenceType.factory()
obj_.build(child_)
self.Object_Equivalence.append(obj_)
# end class ObjectEquivalenceListType
class ObjectEquivalenceType(maec_bundle_schema.ObjectReferenceListType):
"""The ObjectEquivalenceType relates the Objects that are equivalent to
each other, e.g., those that were found for the same Malware
Subject when using different analysis tools.The required id
field specifies the ID for the Object Equivalence, and must be
of the format specified by the ObjectEquivalenceIDPattern type."""
subclass = None
superclass = maec_bundle_schema.ObjectReferenceListType
def __init__(self, Object_Reference=None, id=None):
super(ObjectEquivalenceType, self).__init__(Object_Reference, )
self.id = _cast(None, id)
pass
def factory(*args_, **kwargs_):
if ObjectEquivalenceType.subclass:
return ObjectEquivalenceType.subclass(*args_, **kwargs_)
else:
return ObjectEquivalenceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_id(self): return self.id
def set_id(self, id): self.id = id
def hasContent_(self):
if (
super(ObjectEquivalenceType, self).hasContent_()
):
return True
else:
return | |
in the list.")
facet_chain = [end_facets[-1]]
other_facets = chain_facets.copy()
other_facets.remove(facet_chain[0])
for i in range(len(other_facets)):
options = []
for facet in other_facets:
# See if the facet connects to the last facet in the chain
if len(set(facet.sites) & set(facet_chain[-1].sites)) == 2:
# Check the amount of links this next facet has
leftover_facets = other_facets.copy()
leftover_facets.remove(facet)
number_links = 0
for leftover_facet in leftover_facets:
if len(set(facet.sites) & set(leftover_facet.sites)) \
== 2:
number_links += 1
options.append((facet, number_links))
if len(options) == 1:
facet_chain.append(options[0][0])
other_facets.remove(options[0][0])
else:
for option in options:
if option[1] == 1:
facet_chain.append(option[0])
other_facets.remove(option[0])
break
if len(facet_chain) < chain_length:
print('WARNING: Could not connect all facets.')
return facet_chain
def find_facet_links(self, share_edge=False):
"""
Find the non-equivalent links between facets of the cage
molecule. The facets can be connected by sharing an edge, or a site.
Args:
share_edge (bool): Only return links between facets that share an edge.
Returns:
(List of (cage.Facet, cage.Facet) Tuples) - The
non-equivalent facet links of the Cage.
"""
# Find all links, i.e. possible combinations of surface facets
links = list(combinations(self.facets, 2))
# Find the links that share a site (this automatically finds
# those that share an edge as well).
site_sharing_links = []
for link in links:
cross_section = set(link[0].sites) & set(link[1].sites)
if cross_section:
# In case the user only wants edge-sharing paths, check that
if share_edge:
if len(cross_section) == 2:
site_sharing_links.append(link)
# Else just add the path to the list
else:
site_sharing_links.append(link)
# Find the site sharing paths that are non equivalent
noneq_links = []
for link in site_sharing_links:
# Check to see if the path is equivalent with a path in the List of
# non-equivalent paths
nonequivalent = True
for noneq_link in noneq_links:
for symm in self.symmops:
link_center = (link[0].center + link[1].center) / 2
noneq_link_center = sum((noneq_link[0].center,
noneq_link[1].center)) / 2
symm_link_center = symm.operate(link_center)
connection_vector = symm_link_center - noneq_link_center
if np.linalg.norm(connection_vector) < 1e-2:
nonequivalent = False
if nonequivalent:
noneq_links.append(link)
return noneq_links
def find_noneq_chain_links(self, facets=tuple, symm_tol=SYMMETRY_TOLERANCE,
verbose=False):
"""
Find the links between the facets of the chain that connects a
set of non equivalent facets.
Args:
facets (tuple): Tuple of Facets which are to be used for the
chain. In case no facets are provided, the full list of
non-equivalent facets will be used.
symm_tol (float): Tolerance for the equivalence condition, i.e.
how much the distance between the centers is allowed to be
after a symmetry operation.
verbose (bool): Print information about the analysis procedure.
This is mainly useful when the result is not as expected.
Returns:
(*List of (cage.Facet, cage.Facet) Tuples*) --
The links between the Facets in the chain of non-equivalent
Facets.
"""
facet_chain = self.find_noneq_facet_chain(facets=facets,
symm_tol=symm_tol,
verbose=verbose)
chain_links = []
for index in range(len(facet_chain) - 1):
chain_links.append((facet_chain[index], facet_chain[index + 1]))
return chain_links
def find_farthest_facet(self, point):
"""
Find the Facet of the Molecule that is the farthest away from the point
provided.
Args:
point ((3,) numpy.ndarray): Point provided by user.
Returns:
(*cage.Facet*)
"""
distance = 0
furthest_facet = None
for facet in self.facets:
newdistance = np.linalg.norm(point - facet.center)
if newdistance > distance:
furthest_facet = facet
distance = newdistance
return furthest_facet
def find_closest_facet(self, point):
"""
Find the Facet of the Molecule that is the closest to the point
provided.
Args:
point ((3,) numpy.ndarray): Point provided by user.
Returns:
(*cage.Facet*)
"""
distance = 1e6
closest_facet = None
for facet in self.facets:
newdistance = np.linalg.norm(point - facet.center)
if newdistance < distance:
closest_facet = facet
distance = newdistance
return closest_facet
class OccupiedCage(Cage):
"""
A Cage Molecule that has one or more cations docked on it.
"""
CATIONS = (pmg.Element('Li'), pmg.Element('Na'), pmg.Element('K'),
pmg.Element('Mg'))
def __init__(self, species, coords, charge=0, spin_multiplicity=None,
validate_proximity=False, site_properties=None):
"""
Initialize an OccupiedCage instance. The geometric center of the anion
is automatically centered on the origin.
Args:
species (List of pymatgen.Specie): List of atomic species. Possible
kinds of input include a list of dict of elements/species and
occupancies, a List of elements/specie specified as actual
Element/Specie, Strings ("Fe", "Fe2+") or atomic numbers
(1,56).
coords (List of (3,) numpy.ndarray): List of cartesian coordinates
of each species.
charge (float): Charge for the molecule. Defaults to 0.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
Returns:
(*cage.Cage*)
"""
super(OccupiedCage, self).__init__(species, coords, charge,
spin_multiplicity,
validate_proximity,
site_properties)
self.center()
self._docks = []
@property
def docks(self):
return self._docks
@property
def facets(self):
return self._facets
def center(self, point=None):
"""
Center the OccupiedCage around a point by updating the sites, i.e. find
the coordinates for the sites so that the geometric center **of the
anion** is moved to the point provided. In case no point is provided,
the anion is centered around the origin.
Note: Running this method will reset the facets and symmetry
information to None.
Args:
point ((3,) numpy.ndarray): Point around which to center the
molecule.
"""
anion_center = self.anion_center
if point is not None:
anion_center -= point
# Find the new coordinates
new_coords = np.array(self.cart_coords) - anion_center
# Update the sites
sites = []
for i in range(len(self.species)):
prop = None
if self.site_properties:
prop = {k: v[i] for k, v in self.site_properties.items()}
sites.append(pmg.Site(self.species[i], new_coords[i],
properties=prop))
self._sites = sites
self._facets = None
self._symmops = None
self._pointgroup = None
self._facet_dict = None
def add_dock(self, dock, cation=None, docking_point=None):
"""
Add a docking site to the OccupiedCage. If the chemical symbol of the
cation is provided, the cation is appended to the OccupiedCage. In case
the cation is equal to *None*, the cation is assumed to be present and
the facet is simply designated as a dock.
Note: If a cation is appended to the molecule. running this method will
reset the facets and symmetry information to None.
Args:
dock (cage.Facet): The Facet on which the cation is docked.
cation (str): The chemical symbol of the cation element.
docking_point ((3,) numpy.ndarray): Docking coordinates of the
cation.
"""
# Check if the dock is on of the facets in the OccupiedCage
if dock not in self.facets:
raise ValueError("Docking facet not found in the facet list of the"
" OccupiedCage.")
if not cation:
self._docks.append(dock)
else:
if docking_point:
self.append(pmg.Element(cation), docking_point)
self.set_charge_and_spin(self.charge,
self.spin_multiplicity - 1)
else:
cation_coord = dock.center + 2 * dock.normal
self.append(pmg.Element(cation), cation_coord)
self.set_charge_and_spin(self.charge,
self.spin_multiplicity - 1)
self._docks.append(dock)
# TODO Add some more checks
@classmethod
def from_cage_and_facets(cls, cage, facets, docking_points=(),
cation='Li'):
"""
Initialize an OccupiedCage from a Cage object and a tuple of facets.
Args:
cage (cage.Cage): The anion on which the cations are docked.
facets (tuple): Tuple of cage.Facets on which the cations are
docked.
docking_points (tuple): Tuple of (3,) numpy.ndarray coordinates
that define the docking coordinates of the corresponding
docking Facets.
cation(str): Chemical symbol of the cation. In case *None* is
given, the docking sites are considered to already have a
cation present.
Returns:
(*cage.OccupiedCage*)
"""
occ_cage = cls(species=cage.species, coords=cage.cart_coords,
charge=cage.charge,
spin_multiplicity=cage.spin_multiplicity,
validate_proximity=True,
site_properties=cage.site_properties)
# Add the docked cations to the Cage
for index in range(len(facets)):
try:
occ_cage.add_dock(facets[index],
docking_point=docking_points[index],
cation=cation)
except IndexError:
occ_cage.add_dock(facets[index], cation=cation)
return occ_cage
@classmethod
def from_poscar(cls, filename):
"""
Initialize an OccupiedCage from a VASP POSCAR file.
Args:
filename:
Returns:
"""
pass # TODO
# @classmethod
# def from_file(cls, filename):
# """
# Initialize an OccupiedCage from a file.
#
# Args:
# filename:
#
# Returns:
#
# """
# pass #TODO
def remove_surface_facet(self, facet):
"""
Remove a surface facet from the list of facets of an OccupiedCage.
Args:
facet (cage.Facet): The facet which is to be removed from the
molecule.
"""
surface_facets = self.facets
if surface_facets:
self._facets = surface_facets.remove(facet)
else:
print('Surface Facets have not been set up yet.')
def find_surface_facets(self, ignore=None):
"""
Find the surface facets of the OccupiedCage, minus the facets which
have a docked cation.
Args:
ignore (Tuple of Elements/Species): The | |
<filename>test/unit/test_comparer.py
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import Mock, patch, call
from sqlalchemydiff.comparer import (
_compile_errors,
_diff_dicts,
_discard_ignores,
_discard_ignores_by_name,
_get_columns,
_get_columns_info,
_get_common_tables,
_get_foreign_keys,
_get_foreign_keys_info,
_get_indexes,
_get_indexes_info,
_get_info_dict,
_get_inspectors,
_get_primary_keys,
_get_primary_keys_info,
_get_table_data,
_get_tables,
_get_tables_data,
_get_tables_diff,
_get_tables_info,
_make_result,
_process_type,
_process_types,
compare,
CompareResult,
InspectorFactory,
TablesInfo,
)
from test import assert_items_equal
@pytest.fixture
def mock_inspector_factory():
with patch.object(InspectorFactory, 'from_uri') as from_uri:
from_uri.side_effect = [
Mock(name="Left Inspector From Factory"),
Mock(name="Right Inspector From Factory")
]
yield
@pytest.mark.usefixtures("mock_inspector_factory")
class TestCompareCallsChain(object):
"""This test class makes sure the `compare` function inside process
works as expected.
"""
@pytest.fixture
def _get_inspectors_mock(self):
with patch('sqlalchemydiff.comparer._get_inspectors') as m:
m.return_value = [
Mock(name="Left Inspector"),
Mock(name="Right Inspector"),
]
yield m
@pytest.fixture
def _get_tables_data_mock(self):
with patch('sqlalchemydiff.comparer._get_tables_data') as m:
yield m
@pytest.fixture
def _compile_errors_mock(self):
with patch('sqlalchemydiff.comparer._compile_errors') as m:
def info_side_effect(info):
"""Using this side effect is enough to verify that we
pass the final version of `info` to the `calculate_errors`
function, and that the function actually does something,
which in the mocked version is adding the '_err' key/val.
"""
errors = info.copy()
errors['_err'] = True
return errors
m.side_effect = info_side_effect
yield m
@pytest.fixture
def _get_tables_info_mock(self):
with patch('sqlalchemydiff.comparer._get_tables_info') as m:
m.return_value = TablesInfo(
left=Mock(name="Tables Left"),
right=Mock(name="Tables Right"),
left_only=Mock(name="Tables Only Left"),
right_only=Mock(name="Tables Only Right"),
common=['common_table_A', 'common_table_B'],
)
yield m
@pytest.fixture
def _get_enums_data_mock(self):
with patch('sqlalchemydiff.comparer._get_enums_data') as m:
m.return_value = []
yield m
def test_compare_calls_chain(
self, _get_tables_info_mock, _get_tables_data_mock,
_get_enums_data_mock, _compile_errors_mock):
"""By inspecting `info` and `errors` at the end, we automatically
check that the whole process works as expected. What this test
leaves out is the verifications about inspectors.
"""
_get_tables_data_mock.return_value = {
'common_table_A': {
'data': 'some-data-A',
},
'common_table_B': {
'data': 'some-data-B',
},
}
tables_info = _get_tables_info_mock.return_value
result = compare(
"left_uri", "right_uri", ignores=['ignore_me'])
expected_info = {
'uris': {
'left': "left_uri",
'right': "right_uri",
},
'tables': {
'left': tables_info.left,
'left_only': tables_info.left_only,
'right': tables_info.right,
'right_only': tables_info.right_only,
'common': tables_info.common,
},
'tables_data': {
'common_table_A': {
'data': 'some-data-A',
},
'common_table_B': {
'data': 'some-data-B',
},
},
'enums': {
'left_only': [],
'right_only': [],
'common': [],
'diff': [],
},
}
expected_errors = expected_info.copy()
expected_errors['_err'] = True
assert expected_info == result.info
assert expected_errors == result.errors
def test__get_tables_info_called_with_correct_inspectors(
self, _get_inspectors_mock, _get_tables_info_mock,
_get_tables_data_mock, _get_enums_data_mock,
_compile_errors_mock):
left_inspector, right_inspector = _get_inspectors_mock.return_value
compare("left_uri", "right_uri", ignores=['ignore_me'])
_get_inspectors_mock.assert_called_once_with("left_uri", "right_uri")
_get_tables_info_mock.assert_called_once_with(
left_inspector, right_inspector, set(['ignore_me']))
@pytest.mark.usefixtures("mock_inspector_factory")
class TestCompareInternals(object):
# FIXTURES
@pytest.fixture
def _get_table_data_mock(self):
with patch('sqlalchemydiff.comparer._get_table_data') as m:
yield m
@pytest.fixture
def _diff_dicts_mock(self):
with patch('sqlalchemydiff.comparer._diff_dicts') as m:
yield m
@pytest.fixture
def _get_foreign_keys_mock(self):
with patch('sqlalchemydiff.comparer._get_foreign_keys') as m:
yield m
@pytest.fixture
def _get_primary_keys_mock(self):
with patch('sqlalchemydiff.comparer._get_primary_keys') as m:
yield m
@pytest.fixture
def _get_indexes_mock(self):
with patch('sqlalchemydiff.comparer._get_indexes') as m:
yield m
@pytest.fixture
def _get_columns_mock(self):
with patch('sqlalchemydiff.comparer._get_columns') as m:
yield m
@pytest.fixture
def _process_types_mock(self):
with patch('sqlalchemydiff.comparer._process_types') as m:
yield m
@pytest.fixture
def _process_type_mock(self):
with patch('sqlalchemydiff.comparer._process_type') as m:
yield m
@pytest.fixture
def _get_foreign_keys_info_mock(self):
with patch('sqlalchemydiff.comparer._get_foreign_keys_info') as m:
yield m
@pytest.fixture
def _get_primary_keys_info_mock(self):
with patch('sqlalchemydiff.comparer._get_primary_keys_info') as m:
yield m
@pytest.fixture
def _get_indexes_info_mock(self):
with patch('sqlalchemydiff.comparer._get_indexes_info') as m:
yield m
@pytest.fixture
def _get_columns_info_mock(self):
with patch('sqlalchemydiff.comparer._get_columns_info') as m:
yield m
@pytest.fixture
def _get_constraints_info_mock(self):
with patch('sqlalchemydiff.comparer._get_constraints_info') as m:
yield m
# TESTS
def test__get_inspectors(self):
left_inspector_mock, right_inspector_mock = Mock(), Mock()
InspectorFactory.from_uri.side_effect = [
left_inspector_mock, right_inspector_mock]
left_inspector, right_inspector = _get_inspectors(
"left_uri", "right_uri")
assert (
[call("left_uri"), call("right_uri")] ==
InspectorFactory.from_uri.call_args_list
)
assert left_inspector_mock == left_inspector
assert right_inspector_mock == right_inspector
def test__get_tables(self):
left_inspector, right_inspector = Mock(), Mock()
left_inspector.get_table_names.return_value = ['B', 'ignore_me', 'A']
right_inspector.get_table_names.return_value = ['C', 'D', 'ignore_me']
tables_left, tables_right = _get_tables(
left_inspector, right_inspector, set(['ignore_me'])
)
assert ['A', 'B'] == tables_left
assert ['C', 'D'] == tables_right
def test__get_tables_diff(self):
tables_left = ['B', 'A', 'Z', 'C']
tables_right = ['D', 'Z', 'C', 'F']
tables_left_only, tables_right_only = _get_tables_diff(
tables_left, tables_right)
assert ['A', 'B'] == tables_left_only
assert ['D', 'F'] == tables_right_only
def test__get_common_tables(self):
tables_left = ['B', 'A', 'Z', 'C']
tables_right = ['D', 'Z', 'C', 'F']
tables_common = _get_common_tables(tables_left, tables_right)
assert ['C', 'Z'] == tables_common
def test__get_tables_info(self):
left_inspector, right_inspector = Mock(), Mock()
left_inspector.get_table_names.return_value = [
'B', 'ignore_me', 'A', 'C']
right_inspector.get_table_names.return_value = [
'D', 'C', 'ignore_me', 'Z']
tables_info = _get_tables_info(
left_inspector, right_inspector, set(['ignore_me']))
assert ['A', 'B', 'C'] == tables_info.left
assert ['C', 'D', 'Z'] == tables_info.right
assert ['A', 'B'] == tables_info.left_only
assert ['D', 'Z'] == tables_info.right_only
assert ['C'] == tables_info.common
def test__get_info_dict(self):
tables_info = TablesInfo(
left=['A', 'B', 'C'], right=['C', 'D', 'Z'],
left_only=['A', 'B'], right_only=['D', 'Z'], common=['C'])
info = _get_info_dict('left_uri', 'right_uri', tables_info)
expected_info = {
'uris': {
'left': 'left_uri',
'right': 'right_uri',
},
'tables': {
'left': ['A', 'B', 'C'],
'left_only': ['A', 'B'],
'right': ['C', 'D', 'Z'],
'right_only': ['D', 'Z'],
'common': ['C'],
},
'tables_data': {},
'enums': {},
}
assert expected_info == info
def test__get_tables_data(self, _get_table_data_mock):
_get_table_data_mock.side_effect = [
{'table_data': 'data_A'},
{'table_data': 'data_B'},
]
left_inspector, right_inspector, ignore_manager = (
Mock(), Mock(), Mock()
)
tables_common = ['common_table_A', 'common_table_B']
tables_data = _get_tables_data(
tables_common, left_inspector, right_inspector, ignore_manager)
expected_tables_data = {
'common_table_A': {'table_data': 'data_A'},
'common_table_B': {'table_data': 'data_B'},
}
assert expected_tables_data == tables_data
assert [
call(
left_inspector, right_inspector, 'common_table_A',
ignore_manager
),
call(
left_inspector, right_inspector, 'common_table_B',
ignore_manager
),
] == _get_table_data_mock.call_args_list
def test__make_result(self):
info = {'info': 'dict'}
errors = {'errors': 'dict'}
result = _make_result(info, errors)
assert isinstance(result, CompareResult)
assert info == result.info
assert errors == result.errors
def test__diff_dicts(self):
left = {
'a': 'value-a',
'b': 'value-b-left',
'c': 'value-common',
}
right = {
'b': 'value-b-right',
'c': 'value-common',
'd': 'value-d',
}
expected_result = {
'left_only': ['value-a'],
'right_only': ['value-d'],
'common': ['value-common'],
'diff': [
{'key': 'b',
'left': 'value-b-left',
'right': 'value-b-right'}
],
}
result = _diff_dicts(left, right)
assert expected_result == result
def test__get_foreign_keys_info(
self, _diff_dicts_mock, _get_foreign_keys_mock):
_get_foreign_keys_mock.side_effect = [
[{'name': 'fk_left_1'}, {'name': 'fk_left_2'}],
[{'name': 'fk_right_1'}]
]
left_inspector, right_inspector = Mock(), Mock()
result = _get_foreign_keys_info(
left_inspector, right_inspector, 'table_A', [])
_diff_dicts_mock.assert_called_once_with(
{
'fk_left_1': {'name': 'fk_left_1'},
'fk_left_2': {'name': 'fk_left_2'}
},
{
'fk_right_1': {'name': 'fk_right_1'}
}
)
assert _diff_dicts_mock.return_value == result
def test__get_foreign_keys_info_ignores(
self, _diff_dicts_mock, _get_foreign_keys_mock):
_get_foreign_keys_mock.side_effect = [
[{'name': 'fk_left_1'}, {'name': 'fk_left_2'}],
[{'name': 'fk_right_1'}, {'name': 'fk_right_2'}]
]
left_inspector, right_inspector = Mock(), Mock()
ignores = ['fk_left_1', 'fk_right_2']
result = _get_foreign_keys_info(
left_inspector, right_inspector, 'table_A', ignores)
_diff_dicts_mock.assert_called_once_with(
{
'fk_left_2': {'name': 'fk_left_2'}
},
{
'fk_right_1': {'name': 'fk_right_1'}
}
)
assert _diff_dicts_mock.return_value == result
def test__get_foreign_keys(self):
inspector = Mock()
result = _get_foreign_keys(inspector, 'table_A')
inspector.get_foreign_keys.assert_called_once_with('table_A')
assert inspector.get_foreign_keys.return_value == result
def test__get_primary_keys_info(
self, _diff_dicts_mock, _get_primary_keys_mock):
_get_primary_keys_mock.side_effect = [
{'constrained_columns': ['pk_left_1', 'pk_left_2']},
{'constrained_columns': ['pk_right_1']}
]
left_inspector, right_inspector = Mock(), Mock()
result = _get_primary_keys_info(
left_inspector, right_inspector, 'table_A', [])
_diff_dicts_mock.assert_called_once_with(
{'pk_left_1': 'pk_left_1', 'pk_left_2': 'pk_left_2'},
{'pk_right_1': 'pk_right_1'}
)
assert _diff_dicts_mock.return_value == result
def test__get_primary_keys_info_ignores(
self, _diff_dicts_mock, _get_primary_keys_mock):
_get_primary_keys_mock.side_effect = [
{'constrained_columns': ['pk_left_1', 'pk_left_2']},
{'constrained_columns': ['pk_right_1', 'pk_right_2']},
]
left_inspector, right_inspector = Mock(), Mock()
ignores = ['pk_left_1', 'pk_right_2']
result = _get_primary_keys_info(
left_inspector, right_inspector, 'table_A', ignores)
_diff_dicts_mock.assert_called_once_with(
{'pk_left_2': 'pk_left_2'},
{'pk_right_1': 'pk_right_1'}
)
assert _diff_dicts_mock.return_value == result
def test__get_primary_keys_info_with_pk_constraint_name(
self, _diff_dicts_mock, _get_primary_keys_mock):
_get_primary_keys_mock.side_effect = [
{'name': 'left', 'constrained_columns': ['pk_left_1']},
{'name': 'right', 'constrained_columns': ['pk_right_1']}
]
left_inspector, right_inspector = Mock(), Mock()
result = _get_primary_keys_info(
left_inspector, right_inspector, 'table_A', [])
_diff_dicts_mock.assert_called_once_with(
{
'left': {'name': 'left',
'constrained_columns': ['pk_left_1']}
},
{
'right': {'name': 'right',
'constrained_columns': ['pk_right_1']}
}
)
assert _diff_dicts_mock.return_value == result
def test__get_primary_keys_info_ignores_with_pk_constraint_name(
self, _diff_dicts_mock, _get_primary_keys_mock):
_get_primary_keys_mock.side_effect = [
{'name': 'left_1', 'constrained_columns': ['pk_left_1']},
{'name': 'right_1', 'constrained_columns': ['pk_right_1']},
]
left_inspector, right_inspector = Mock(), Mock()
ignores = ['left_1', 'left_2', 'right_2']
result = _get_primary_keys_info(
left_inspector, right_inspector, 'table_A', ignores)
_diff_dicts_mock.assert_called_once_with(
dict(),
{
'right_1': {'name': 'right_1',
'constrained_columns': ['pk_right_1']},
}
)
assert _diff_dicts_mock.return_value == result
def test__get_primary_keys(self):
inspector = Mock()
result = _get_primary_keys(inspector, 'table_A')
inspector.get_pk_constraint.assert_called_once_with('table_A')
assert inspector.get_pk_constraint.return_value == result
def test__get_indexes_info(
self, _diff_dicts_mock, _get_indexes_mock):
_get_indexes_mock.side_effect = [
[{'name': 'index_left_1'}, {'name': 'index_left_2'}],
[{'name': 'index_right_1'}]
]
left_inspector, right_inspector = Mock(), Mock()
result = _get_indexes_info(
left_inspector, right_inspector, 'table_A', [])
_diff_dicts_mock.assert_called_once_with(
{
'index_left_1': {'name': 'index_left_1'},
'index_left_2': {'name': 'index_left_2'}
},
{
'index_right_1': {'name': 'index_right_1'}
}
)
assert _diff_dicts_mock.return_value == result
def test__get_indexes_info_ignores(
self, _diff_dicts_mock, _get_indexes_mock):
_get_indexes_mock.side_effect = [
[{'name': 'index_left_1'}, {'name': 'index_left_2'}],
[{'name': 'index_right_1'}, {'name': 'index_right_2'}]
]
left_inspector, right_inspector = Mock(), Mock()
ignores = ['index_left_1', 'index_right_2']
result = _get_indexes_info(
left_inspector, right_inspector, 'table_A', ignores)
_diff_dicts_mock.assert_called_once_with(
{
'index_left_2': {'name': 'index_left_2'}
},
{
'index_right_1': {'name': 'index_right_1'}
}
)
assert _diff_dicts_mock.return_value == result
def test__get_indexes(self):
inspector = Mock()
result = _get_indexes(inspector, 'table_A')
inspector.get_indexes.assert_called_once_with('table_A')
assert inspector.get_indexes.return_value == result
def test__get_columns_info(
self, _diff_dicts_mock, _get_columns_mock, _process_types_mock):
_get_columns_mock.side_effect = [
[{'name': 'columns_left_1'}, {'name': 'columns_left_2'}],
[{'name': 'columns_right_1'}]
]
def process_types_side_effect(columns):
columns['_processed'] = True
_process_types_mock.side_effect = process_types_side_effect
left_inspector, right_inspector = Mock(), Mock()
result = _get_columns_info(
left_inspector, right_inspector, 'table_A', [])
_diff_dicts_mock.assert_called_once_with(
{
| |
still be fine. By disabling automatic Windows updates you will have
greater control of when that happens.
Would you like to disable automatic Windows updates?
''' ),
buttons=[
('Disable', 1),
('Skip', 2),
('Quit', False)
]
).run()
if result == 2:
return True
if not result:
return result
# Based on Debloat-Windows-10 optimize windows update script
# https://github.com/W4RH4WK/Debloat-Windows-10/blob/master/scripts/optimize-windows-update.ps1
wuau_key = r'SOFTWARE\Wow6432Node\Policies\Microsoft\Windows\WindowsUpdate\AU'
with winreg.CreateKeyEx(winreg.HKEY_LOCAL_MACHINE, wuau_key) as key:
winreg.SetValueEx(key, 'NoAutoUpdate', 0, winreg.REG_DWORD, 0)
winreg.SetValueEx(key, 'AUOptions', 0, winreg.REG_DWORD, 2)
winreg.SetValueEx(key, 'ScheduledInstallDay', 0, winreg.REG_DWORD, 0)
winreg.SetValueEx(key, 'ScheduledInstallTime', 0, winreg.REG_DWORD, 3)
delivery_optimization_key = r'SOFTWARE\Policies\Microsoft\Windows\DeliveryOptimization'
with winreg.CreateKeyEx(winreg.HKEY_LOCAL_MACHINE, delivery_optimization_key) as key:
winreg.SetValueEx(key, 'DODownloadMode', 0, winreg.REG_DWORD, 0)
return True
def install_monitoring(base_directory):
base_directory = Path(base_directory)
result = button_dialog(
title='Monitoring installation',
text=(
'''
This next step is optional but recommended. It will install Prometheus,
Grafana and Windows Exporter so you can easily monitor your machine's
resources, Geth, Teku and your validator(s).
It will download the official Prometheus binary distribution from GitHub,
it will download the official Grafana binary distribution their official
website and it will download the official Windows Exporter binary
distribution from GitHub.
Once installed locally, it will create a service that will automatically
start Prometheus, Grafana and Windows Exporter on reboot or if they crash.
''' ),
buttons=[
('Install', 1),
('Skip', 2),
('Quit', False)
]
).run()
if result == 2:
return True
if not result:
return result
if not install_prometheus(base_directory):
return False
if not install_windows_exporter(base_directory):
return False
if not install_grafana(base_directory):
return False
# Show message on how to use monitoring
result = button_dialog(
title='Monitoring has been installed successfully',
text=(
f'''
Everything needed for basic monitoring has been installed correctly.
You can access your Grafana server on: http://localhost:3000/
There is already an administrator user with the username: admin . You can
login with the default password: <PASSWORD> . On first login, you will be asked
to change your password.
Once logged in, you should be able to see various dashboards for Geth,
Teku and your system resources.
''' ),
buttons=[
('Keep going', True),
('Quit', False)
]
).run()
return result
def install_prometheus(base_directory):
# Install Prometheus as a service
nssm_binary = get_nssm_binary()
if not nssm_binary:
return False
# Check for existing service
prometheus_service_exists = False
prometheus_service_name = 'prometheus'
service_details = get_service_details(nssm_binary, prometheus_service_name)
if service_details is not None:
prometheus_service_exists = True
if prometheus_service_exists:
result = button_dialog(
title='Prometheus service found',
text=(
f'''
The prometheus service seems to have already been created. Here are some
details found:
Display name: {service_details['parameters'].get('DisplayName')}
Status: {service_details['status']}
Binary: {service_details['install']}
App parameters: {service_details['parameters'].get('AppParameters')}
App directory: {service_details['parameters'].get('AppDirectory')}
Do you want to skip installing prometheus and its service?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
if result == 1:
return True
# User wants to proceed, make sure the prometheus service is stopped first
subprocess.run([
str(nssm_binary), 'stop', prometheus_service_name])
# Check if prometheus is already installed
prometheus_path = base_directory.joinpath('bin', 'prometheus')
prometheus_binary_file = prometheus_path.joinpath('prometheus.exe')
prometheus_found = False
prometheus_version = 'unknown'
if prometheus_binary_file.is_file():
try:
process_result = subprocess.run([
str(prometheus_binary_file), '--version'
], capture_output=True, text=True)
prometheus_found = True
process_output = process_result.stdout
result = re.search(r'prometheus, version (?P<version>[^ ]+)', process_output)
if result:
prometheus_version = result.group('version').strip()
except FileNotFoundError:
pass
install_prometheus_binary = True
if prometheus_found:
result = button_dialog(
title='Prometheus binary distribution found',
text=(
f'''
The prometheus binary distribution seems to have already been installed.
Here are some details found:
Version: {prometheus_version}
Location: {prometheus_path}
Do you want to skip installing the prometheus binary distribution?
''' ),
buttons=[
('Skip', 1),
('Install', 2),
('Quit', False)
]
).run()
if not result:
return result
install_prometheus_binary = (result == 2)
if install_prometheus_binary:
# Getting latest Prometheus release files
prometheus_gh_release_url = GITHUB_REST_API_URL + PROMETHEUS_LATEST_RELEASE
headers = {'Accept': GITHUB_API_VERSION}
try:
response = httpx.get(prometheus_gh_release_url, headers=headers,
follow_redirects=True)
except httpx.RequestError as exception:
log.error(f'Cannot get latest Prometheus release from Github. '
f'Exception {exception}')
return False
if response.status_code != 200:
log.error(f'Cannot get latest Prometheus release from Github. '
f'Status code {response.status_code}')
return False
release_json = response.json()
if 'assets' not in release_json:
log.error('No assets found in Github release for Prometheus.')
return False
binary_asset = None
for asset in release_json['assets']:
if 'name' not in asset:
continue
if 'browser_download_url' not in asset:
continue
file_name = asset['name']
file_url = asset['browser_download_url']
if file_name.endswith('windows-amd64.zip'):
binary_asset = {
'file_name': file_name,
'file_url': file_url
}
break
if binary_asset is None:
log.error('No prometheus binary distribution found in Github release')
return False
# Downloading latest Prometheus binary distribution archive
download_path = base_directory.joinpath('downloads')
download_path.mkdir(parents=True, exist_ok=True)
url_file_name = binary_asset['file_name']
zip_url = binary_asset['file_url']
prometheus_archive_path = download_path.joinpath(url_file_name)
prometheus_archive_hash = hashlib.sha256()
if prometheus_archive_path.is_file():
prometheus_archive_path.unlink()
try:
with open(prometheus_archive_path, 'wb') as binary_file:
log.info(f'Downloading prometheus archive {url_file_name}...')
with httpx.stream('GET', zip_url, follow_redirects=True) as http_stream:
if http_stream.status_code != 200:
log.error(f'Cannot download prometheus archive {zip_url}.\n'
f'Unexpected status code {http_stream.status_code}')
return False
for data in http_stream.iter_bytes():
binary_file.write(data)
prometheus_archive_hash.update(data)
except httpx.RequestError as exception:
log.error(f'Exception while downloading prometheus archive. Exception {exception}')
return False
# Unzip prometheus archive
archive_members = None
log.info(f'Extracting prometheus archive {url_file_name}...')
with ZipFile(prometheus_archive_path, 'r') as zip_file:
archive_members = zip_file.namelist()
zip_file.extractall(download_path)
# Remove download leftovers
prometheus_archive_path.unlink()
if archive_members is None or len(archive_members) == 0:
log.error('No files found in prometheus archive. We cannot continue.')
return False
# Move all those extracted files into their final destination
if prometheus_path.is_dir():
shutil.rmtree(prometheus_path)
prometheus_path.mkdir(parents=True, exist_ok=True)
archive_extracted_dir = download_path.joinpath(Path(archive_members[0]).parts[0])
with os.scandir(archive_extracted_dir) as it:
for diritem in it:
shutil.move(diritem.path, prometheus_path)
# Make sure prometheus was installed properly
prometheus_found = False
if prometheus_binary_file.is_file():
try:
process_result = subprocess.run([
str(prometheus_binary_file), '--version'
], capture_output=True, text=True)
prometheus_found = True
process_output = process_result.stdout
result = re.search(r'prometheus, version (?P<version>[^ ]+)', process_output)
if result:
prometheus_version = result.group('version').strip()
except FileNotFoundError:
pass
if not prometheus_found:
log.error(f'We could not find the prometheus binary distribution from the installed '
f'archive in {prometheus_path}. We cannot continue.')
return False
else:
log.info(f'Prometheus version {prometheus_version} installed.')
# Check if prometheus directory already exists
prometheus_datadir = base_directory.joinpath('var', 'lib', 'prometheus')
if prometheus_datadir.is_dir():
prometheus_datadir_size = sizeof_fmt(get_dir_size(prometheus_datadir))
result = button_dialog(
title='Prometheus data directory found',
text=(
f'''
An existing prometheus data directory has been found. Here are some details
found:
Location: {prometheus_datadir}
Size: {prometheus_datadir_size}
Do you want to remove this directory first and start from nothing?
''' ),
buttons=[
('Remove', 1),
('Keep', 2),
('Quit', False)
]
).run()
if not result:
return result
if result == 1:
shutil.rmtree(prometheus_datadir)
# Setup prometheus directory
prometheus_datadir.mkdir(parents=True, exist_ok=True)
# Setup prometheus config file
prometheus_config_path = base_directory.joinpath('etc', 'prometheus')
if not prometheus_config_path.is_dir():
prometheus_config_path.mkdir(parents=True, exist_ok=True)
prometheus_config_file = prometheus_config_path.joinpath('prometheus.yml')
if prometheus_config_file.is_file():
prometheus_config_file.unlink()
with open(str(prometheus_config_file), 'w', encoding='utf8') as config_file:
config_file.write(PROMETHEUS_CONFIG_WINDOWS)
# Setup prometheus service
log_path = base_directory.joinpath('var', 'log')
log_path.mkdir(parents=True, exist_ok=True)
prometheus_stdout_log_path = log_path.joinpath('prometheus-service-stdout.log')
prometheus_stderr_log_path = log_path.joinpath('prometheus-service-stderr.log')
if prometheus_stdout_log_path.is_file():
prometheus_stdout_log_path.unlink()
if prometheus_stderr_log_path.is_file():
prometheus_stderr_log_path.unlink()
prometheus_arguments = PROMETHEUS_ARGUMENTS
prometheus_arguments.append('--config.file="' + str(prometheus_config_file) + '"')
prometheus_arguments.append('--storage.tsdb.path="' + str(prometheus_datadir) + '"')
parameters = {
'DisplayName': PROMETHEUS_SERVICE_DISPLAY_NAME,
'AppRotateFiles': '1',
'AppRotateSeconds': '86400',
'AppRotateBytes': '10485760',
'AppStdout': str(prometheus_stdout_log_path),
'AppStderr': str(prometheus_stderr_log_path)
}
if not create_service(nssm_binary, prometheus_service_name, prometheus_binary_file,
prometheus_arguments, parameters):
log.error('There was an issue creating the prometheus service. We cannot continue.')
return False
log.info('Starting prometheus service...')
process_result = subprocess.run([
str(nssm_binary), 'start', prometheus_service_name
])
delay = 15
log.info(f'We are giving {delay} seconds for the prometheus service to start properly.')
time.sleep(delay)
# Verify proper Prometheus service installation
service_details = get_service_details(nssm_binary, prometheus_service_name)
if not service_details:
log.error('We could not find the prometheus service we just created. '
'We cannot continue.')
return False
if not (
service_details['status'] == WINDOWS_SERVICE_RUNNING):
result = button_dialog(
title='Prometheus service not running properly',
text=(
f'''
The prometheus service we just created seems to have issues. Here are some
details found:
Display name: {service_details['parameters'].get('DisplayName')}
Status: {service_details['status']}
Binary: {service_details['install']}
App parameters: {service_details['parameters'].get('AppParameters')}
App directory: {service_details['parameters'].get('AppDirectory')}
We cannot proceed if the prometheus service cannot be started properly.
Make sure to check the logs and fix any issue found there. You can see the
logs in:
{prometheus_stderr_log_path}
''' ),
buttons=[
('Quit', False)
]
).run()
# Stop the service to prevent indefinite restart attempts
subprocess.run([
str(nssm_binary), 'stop', prometheus_service_name])
log.info(
f'''
To examine your prometheus service logs, inspect the following file:
{prometheus_stderr_log_path}
'''
)
return False
# Iterate over the logs and output them for around 10 seconds
err_log_read_index = 0
for i in range(2):
err_log_text = ''
with open(prometheus_stderr_log_path, 'r', encoding='utf8') as log_file:
log_file.seek(err_log_read_index)
err_log_text = log_file.read()
err_log_read_index = log_file.tell()
err_log_length = len(err_log_text)
if err_log_length > 0:
print(err_log_text, end='')
time.sleep(5)
# Do a simple query on Prometheus to see if it's working properly
local_prometheus_query_url = 'http://localhost:9090/api/v1/query'
params = | |
chromophore to a particular position and orientation
from pymol import cmd
cmd.translate(vector=phenolCOMPosition, selection=selection)
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def generateRandomCoordinates(steps, standardError):
from numpy import array, float
from random import random
from math import sqrt
# Generate random coordinates in a sphere
randomCoordinates = []
# Make the first coordinate be at zero error
randomCoordinates.append([0.0, 0.0, 0.0])
i = 0
while i < steps:
if random() > 0.5:
signX = +1.0
else:
signX = -1.0
if random() > 0.5:
signY = +1.0
else:
signY = -1.0
if random() > 0.5:
signZ = +1.0
else:
signZ = -1.0
randX = random()*signX
randY = random()*signY
randZ = random()*signZ
if sqrt(randX**2 + randY**2 + randZ**2) <= 1:
randCoords = array([randX, randY, randZ], float)
randCoords = randCoords*standardError
randomCoordinates.append(randCoords)
i = i + 1
return randomCoordinates
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def generateNormalRandCoordinates(steps, standardError, firstCoordNoError=True, sigma=1.0):
# sigma is the standard deviation at which to accept random numbers
from RandomArray import normal
from numpy import array, float
from random import random
from math import sqrt
# Generate random coordinates in a sphere
randomCoordinates = []
# Make the first coordinate be at zero error
if firstCoordNoError == True:
randomCoordinates.append([0.0, 0.0, 0.0])
i = 0
while i < steps:
randX = normal(0.0,standardError)
randY = normal(0.0,standardError)
randZ = normal(0.0,standardError)
if randX*randX + randY*randY + randZ*randZ <= \
standardError*standardError*sigma*sigma:
randCoords = array([randX, randY, randZ], float)
randomCoordinates.append(randCoords)
i = i + 1
return randomCoordinates
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def generateNormalRandomAngles(steps, standardError, length, \
firstAngleNoError=True, sigma=1.0):
from RandomArray import normal
from numpy import arctan, pi, sqrt
randomLengths = []
# Make the first coordinate be at zero error
if firstAngleNoError == True:
randomLengths.append(0.0)
i = 0
while i < steps:
randLength = normal(0.0,standardError)
if sqrt(randLength*randLength) <= standardError*sigma:
randomLengths.append(randLength)
i = i + 1
i = 0
randomAngles = []
while i < len(randomLengths):
randomAngle = arctan(randomLengths[i]/length)
randomAngles.append(randomAngle)
i = i + 1
return randomAngles
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def randSign():
from random import random
rand = random()
if rand >= 0.5:
sign = +1.0
else:
sign = -1.0
return sign
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def MonteCarloMove5(identifier, croSelection, tyr203Selection, \
standardError, steps, state=1):
# The first energies returned are with no positional error
from numpy import array, float
from yaehmop2 import tightbind, energyGap, makeYaehmopInput
from pymol import cmd
# Generate random coordinates in a sphere
randomCoordinatesCro = generateNormalRandCoordinates(steps, standardError)
# Calculate the energies of the unperturbed state
i = 0
homoEnergies = []
lumoEnergies = []
selectionForCalculation = croSelection + ' or ' + tyr203Selection
# Send out the state for an energy calculation
filename = makeYaehmopInput(identifier + '_' + str(i), \
selectionForCalculation, charge=-1)
tightbind(filename)
[HOMOEnergy, LUMOEnergy] = energyGap(filename)
homoEnergies.append(HOMOEnergy)
lumoEnergies.append(LUMOEnergy)
# Use the random coordinates to move the selection around
i = 1
temp_cro_Name = 'temp_cro'
temp_tyr_Name = 'temp_tyr'
while i < len(randomCoordinatesCro):
randCroCoord = randomCoordinatesCro[i]
# Generate a copy of the chromophore selection
cmd.create(temp_cro_Name, croSelection)
cmd.create(temp_tyr_Name, tyr203Selection)
cmd.select(temp_cro_Name, temp_cro_Name)
cmd.select(temp_tyr_Name, temp_tyr_Name)
# Move the temporary selection
moveRelative(temp_cro_Name, randCroCoord, state=state)
selectionForCalculation = temp_cro_Name + ' or ' +\
temp_tyr_Name
# Send out the state for an energy calculation
filename = makeYaehmopInput(identifier + '_' + str(i), \
selectionForCalculation, charge=-1)
tightbind(filename)
[HOMOEnergy, LUMOEnergy] = energyGap(filename)
homoEnergies.append(HOMOEnergy)
lumoEnergies.append(LUMOEnergy)
# Delete the copy of the chromophore selection
cmd.delete(temp_cro_Name)
cmd.delete(temp_tyr_Name)
i = i + 1
return [homoEnergies, lumoEnergies]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def atomicNumber(atomSymbol):
# Returns the atomic number of an atom with a given symbol
atomicNumber = 0.0
if atomSymbol == 'C':
atomicNumber = 6
if atomSymbol == 'O':
atomicNumber = 8
if atomSymbol == 'N':
atomicNumber = 7
if atomSymbol == 'H':
atomicNumber = 1
return atomicNumber
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def centerOfElectronDensity(selection):
from pymol import cmd
model = cmd.get_model(selection)
x, y , z = 0.0, 0.0 , 0.0
electronCount = 0.0
for a in model.atom:
aNumber = atomicNumber(a.symbol)
x+= a.coord[0]*aNumber
y+= a.coord[1]*aNumber
z+= a.coord[2]*aNumber
electronCount+= aNumber
return [x/electronCount, y/electronCount, z/electronCount]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def centerOfMass(selection):
from pymol import cmd
x, y , z = 0.0, 0.0 , 0.0
totalMass = 0.0
model = cmd.get_model(selection)
for a in model.atom:
mass = atomicMass(a.symbol)
x+= a.coord[0]*mass
y+= a.coord[1]*mass
z+= a.coord[2]*mass
totalMass += mass
return [x/totalMass, y/totalMass, z/totalMass]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def atomicMass(atomSymbol):
# Returns the atomic number of an atom with a given symbol
# Will return atomic mass of 0.0 if symbol is not recognized.
atomicMass = 0.0
if atomSymbol == 'C':
atomicMass = 12.0107
if atomSymbol == 'O':
atomicMass = 15.9994
if atomSymbol == 'N':
atomicMass = 14.0067
if atomSymbol == 'H':
atomicMass = 1.00794
if atomSymbol == 'S':
atomicMass = 32.065
return atomicMass
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def calculateAxesAndDistancesCro(croSelection):
from pymol import cmd
from numpy import cross
from math import sqrt
eCenterCro=centerOfElectronDensity(croSelection)
selection = "OH"
cmd.select(selection, "name OH and " + croSelection)
OHcoords = cmd.get_model(selection).atom[0].coord
e1Axis = vecLink(eCenterCro, OHcoords)
CCroOH = sqrt(dotProduct(e1Axis, e1Axis))
e1Axis = normalize(e1Axis)
selection = "CB2"
cmd.select(selection, "name CB2 and " + croSelection)
CB2coords = cmd.get_model(selection).atom[0].coord
e2Axis = vecLink(eCenterCro, CB2coords)
CCroCB = sqrt(dotProduct(e2Axis, e2Axis))
e2Axis = normalize(e2Axis)
e3Axis = normalize(cross(e1Axis, e2Axis))
return [eCenterCro, e1Axis, e2Axis, e3Axis, CCroOH, CCroCB]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def calculateAxesAndDistancesTyr203(tyr203Selection):
from pymol import cmd
from numpy import cross
from math import sqrt
eCenterTyr203 = centerOfElectronDensity(tyr203Selection)
selection = "OH"
cmd.select(selection, "name OH and " + tyr203Selection)
OHcoords = cmd.get_model(selection).atom[0].coord
t1Axis = vecLink(eCenterTyr203, OHcoords)
C203OH = sqrt(dotProduct(t1Axis, t1Axis))
t1Axis = normalize(t1Axis)
selection = "CE2"
cmd.select(selection, "name CE2 and " + tyr203Selection)
CE2coords = cmd.get_model(selection).atom[0].coord
t2Axis = vecLink(eCenterTyr203, CE2coords)
C203CE2 = sqrt(dotProduct(t2Axis, t2Axis))
t2Axis = normalize(t2Axis)
t3Axis = normalize(cross(t1Axis, t2Axis))
return [eCenterTyr203, t1Axis, t2Axis, t3Axis, C203OH, C203CE2]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def MonteCarloMoveTilt(identifier, croSelection, tyr203Selection, \
standardError, steps, sigma=3.0, state=1):
# Calculate the energy levels variation of the Citrine chromophore
# under randomly generated positional and tilt errors
from numpy import array, float
from yaehmop2 import tightbind, energyGap, makeYaehmopInput
from pymol import cmd
from buz_rotate_cmd import buz_rotate_cmd
# Calculate eCenterCro, e1, e2, e3 axes and CCroOH and CCroCB distances for chromophore
[eCenterCro, e1Axis, e2Axis, e3Axis, CCroOH, CCroCB] = \
calculateAxesAndDistancesCro(croSelection)
# Calculate eCenter203, t1, t2, t3 axes and C203OH and C203CE2 distances for tyrosine 203
[eCenter203, t1Axis, t2Axis, t3Axis, C203OH, C203CE2] = \
calculateAxesAndDistancesTyr203(tyr203Selection)
# Generate gaussian distributed random coordinates for the center of mass
# of the chromophore and tyrosine 203
randomCoordinatesCro = generateNormalRandCoordinates(steps, standardError, \
sigma=sigma)
randomCoordinatesTyr203 = generateNormalRandCoordinates(steps, \
standardError, sigma=sigma)
# Generate random tilt angles for chromophore; theta1 and theta2
# and second theta1; theta1_2
randomAnglesTheta1_1 = generateNormalRandomAngles(steps, standardError, \
CCroOH, firstAngleNoError=True, sigma=sigma)
randomAnglesTheta2 = generateNormalRandomAngles(steps, standardError, \
CCroCB, firstAngleNoError=True, sigma=sigma)
randomAnglesTheta1_2 = generateNormalRandomAngles(steps, standardError, \
CCroOH, firstAngleNoError=True, sigma=sigma)
# Generate random tilt angles for tyrosine 203; phi1 and phi2
# and second phi1; phi1_2
randomAnglesPhi1_1 = generateNormalRandomAngles(steps, standardError, \
C203OH, firstAngleNoError=True, sigma=sigma)
randomAnglesPhi2 = generateNormalRandomAngles(steps, standardError, \
C203CE2,firstAngleNoError=True, sigma=sigma)
randomAnglesPhi1_2 = generateNormalRandomAngles(steps, standardError, \
C203OH, firstAngleNoError=True, sigma=sigma)
# Monte Carlo loop
# Remember, that the first coordinate and angle set has no error
i = 0
homoEnergies = []
lumoEnergies = []
temp_cro_Name = 'temp_cro'
temp_tyr_Name = 'temp_tyr'
while i < steps+1:
# Generate a copy of the chromophore selection
cmd.create(temp_cro_Name, croSelection)
cmd.create(temp_tyr_Name, tyr203Selection)
cmd.select(temp_cro_Name, temp_cro_Name)
cmd.select(temp_tyr_Name, temp_tyr_Name)
theta1_1 = randomAnglesTheta1_1[i]
theta2 = randomAnglesTheta2[i]
theta1_2 = randomAnglesTheta1_2[i]
phi1_1 = randomAnglesPhi1_1[i]
phi2 = randomAnglesPhi2[i]
phi1_2 = randomAnglesPhi1_2[i]
randCroCoord = randomCoordinatesCro[i]
randTyrCoord = randomCoordinatesTyr203[i]
# Rotate chromophore by theta1 about e3
buz_rotate_cmd(temp_cro_Name, eCenterCro, e3Axis, theta1_1)
# Rotate chromophore by theta2 about e1
buz_rotate_cmd(temp_cro_Name, eCenterCro, e1Axis, theta2)
# Rotate chromophore by theta1_2 about e2
buz_rotate_cmd(temp_cro_Name, eCenterCro, e2Axis, theta1_2)
# Rotate tyrosine 203 by phi1 about t3
buz_rotate_cmd(temp_tyr_Name, eCenter203, t3Axis, phi1_1)
# Rotate tyrosine 203 by phi2 about t1
buz_rotate_cmd(temp_tyr_Name, eCenter203, t1Axis, phi2)
# Rotate tyrosine 203 by phi1_2 about t2
buz_rotate_cmd(temp_tyr_Name, eCenter203, t2Axis, phi1_2)
# Move center of mass of tyrosine 203 to randomly generated coordinates
moveRelative(temp_tyr_Name, randTyrCoord, state=state)
# Move center of mass of chromophore to randomly generated coordinates
moveRelative(temp_cro_Name, randCroCoord, state=state)
# Send geometry to yaehmop2 for energy level calculation
selectionForCalculation = temp_cro_Name + ' or ' + temp_tyr_Name
filename = makeYaehmopInput(identifier + '_' + str(i), \
selectionForCalculation, charge=-1)
tightbind(filename)
# Read in energy levels and save
[HOMOEnergy, LUMOEnergy] = energyGap(filename)
homoEnergies.append(HOMOEnergy)
lumoEnergies.append(LUMOEnergy)
# Delete the temporary chromophore and tyrosine 203 selection
cmd.delete(temp_cro_Name)
cmd.delete(temp_tyr_Name)
# Repeat
i = i + 1
return [homoEnergies, lumoEnergies]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def generateRandomCoordsAndAngles(standardError, CCroOH, CCroCB, C203OH, \
C203CE2, CD1CD2):
from RandomArray import normal
from numpy import array, float, arctan, pi, sqrt
from random import random
from math import sqrt
randCroX = normal(0.0,standardError)
randCroY = normal(0.0,standardError)
randCroZ = normal(0.0,standardError)
randTyrX = normal(0.0,standardError)
randTyrY = normal(0.0,standardError)
randTyrZ = normal(0.0,standardError)
randCroCoords = [randCroX, randCroY, randCroZ]
randTyrCoords = [randTyrX, randTyrY, randTyrZ]
randLengthTheta1_1 = normal(0.0,standardError)
randLengthTheta2_1 = normal(0.0,standardError)
randLengthTheta1_2 = normal(0.0,standardError)
randLengthPhi1_1 = normal(0.0,standardError)
randLengthPhi2_1 = normal(0.0,standardError)
randLengthPhi1_2 = normal(0.0,standardError)
croPhenolLength = normal(0.0,standardError)
theta1_1 = arctan(randLengthTheta1_1/CCroOH)
theta2_1 = arctan(randLengthTheta2_1/CCroCB)
theta1_2 = arctan(randLengthTheta1_2/CCroOH)
phi1_1 = arctan(randLengthPhi1_1/C203OH)
phi2_1 = arctan(randLengthPhi2_1/C203CE2)
phi1_2 = arctan(randLengthPhi1_2/C203OH)
croPhenolAngle = arctan(croPhenolLength/(0.5*CD1CD2))
thetaAngles = [theta1_1, theta2_1, theta1_2]
phiAngles = [phi1_1, phi2_1, phi1_2]
return [randCroCoords, randTyrCoords, thetaAngles, phiAngles, \
croPhenolAngle]
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def perturbChromophoreAndTyr203(temp_cro_Name, temp_tyr_Name, randCroCoord, \
randTyrCoord, thetaAngles, phiAngles, eCenterCro, eCenter203, e1Axis, e2Axis,\
e3Axis, t1Axis, t2Axis, t3Axis, CG2Coord, nCG2CB2Vec, croPhenolAngle):
from buz_rotate_cmd import buz_rotate_cmd
from pymol import cmd
# Rotate the chromophore phenol by croPhenolAngle
name = selectChromophorePhenol(temp_cro_Name)
buz_rotate_cmd(name, CG2Coord, nCG2CB2Vec, croPhenolAngle)
[theta1_1, theta2, theta1_2] = thetaAngles
[phi1_1, phi2, phi1_2] = phiAngles
# Rotate chromophore by theta1 about e3
buz_rotate_cmd(temp_cro_Name, eCenterCro, e3Axis, theta1_1)
# Rotate chromophore by theta2 about e1
buz_rotate_cmd(temp_cro_Name, eCenterCro, e1Axis, theta2)
# Rotate chromophore by theta1_2 about e2
buz_rotate_cmd(temp_cro_Name, eCenterCro, e2Axis, theta1_2)
# Rotate tyrosine 203 by phi1 about t3
buz_rotate_cmd(temp_tyr_Name, eCenter203, t3Axis, phi1_1)
# Rotate tyrosine 203 by phi2 about t1
buz_rotate_cmd(temp_tyr_Name, eCenter203, t1Axis, phi2)
# Rotate tyrosine 203 by phi1_2 about t2
buz_rotate_cmd(temp_tyr_Name, eCenter203, t2Axis, phi1_2)
# Move center of mass of tyrosine 203 to randomly generated coordinates
cmd.translate(selection=temp_tyr_Name, \
vector=[randTyrCoord[0], randTyrCoord[1], randTyrCoord[2]])
# Move center of mass of chromophore to randomly generated coordinates
cmd.translate(selection=temp_cro_Name, \
vector=[randCroCoord[0], randCroCoord[1], randCroCoord[2]])
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
def insideErrorVolume(temp_cro_Name, temp_tyr_Name, croSelection, \
tyr203Selection, standardError, sigmaCoE, sigmaAtom):
from pymol import cmd
from math import sqrt
structureOK = | |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 <NAME> <<EMAIL>>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from io import StringIO
from io import BytesIO
import smtplib
from email import encoders
from email.message import EmailMessage
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from jinja2 import Environment, BaseLoader
from luxon.core.logger import GetLogger
from luxon.utils.encoding import if_bytes_to_unicode
from luxon.utils.html5 import strip_tags
log = GetLogger(__name__)
jinja = Environment(loader=BaseLoader)
def _render_template(template, **kwargs):
template = jinja.from_string(template)
return template.render(**kwargs)
def parse_attachment(message_part):
"""Function to parse attachment from MIME message part.
Args:
message_part (obj): part in the MIME message object tree.
Returns:
obj of either StringIO (for strings) else BytesIO. If no
attachments were present, 'None' is returned.
"""
content_disposition = message_part.get("Content-Disposition", None)
content_id = message_part.get("Content-ID", None)
if content_disposition:
dispositions = content_disposition.strip().split(";")
if (content_disposition and
(dispositions[0].lower() == "attachment" or
dispositions[0].lower() == "inline")):
file_data = message_part.get_payload(decode=True)
if isinstance(file_data, str):
attachment = StringIO(file_data)
else:
attachment = BytesIO(file_data)
attachment.content_type = message_part.get_content_type()
attachment.size = len(file_data)
attachment.name = None
attachment.create_date = None
attachment.mod_date = None
attachment.read_date = None
attachment.disposition = dispositions[0]
attachment.id = content_id
for param in dispositions[1:]:
name, value = param.strip().split("=")
name = name.lower()
value = value.replace('"', "")
value = value.replace("'", "")
if name == "filename":
attachment.name = value
elif name == "create-date":
attachment.create_date = value # TODO: datetime
elif name == "modification-date":
attachment.mod_date = value # TODO: datetime
elif name == "read-date":
attachment.read_date = value # TODO: datetime
return attachment
return None
class ParseContent(object):
"""Utility Class to Parse the content of a MIME message.
Args:
msg (obj): object of python email library's EmailMessage class.
html_template (str): Name of jinja2 template to use to render HTML.
text_template (str): Name of jinja2 template to use to render text.
"""
def __init__(self, msg, html_template=None, text_template=None):
self._text = None
self._html = None
self._html_template = html_template
self._text_template = text_template
self._attachments = []
self._inline = []
if msg.is_multipart():
for part in msg.walk():
attachment = parse_attachment(part)
# skip any text/plain (txt) attachments
if attachment:
self._attachments.append(attachment)
else:
body = part.get_payload(decode=True) # decode
if body is not None:
if 'html' in part.get_content_type():
self._html = if_bytes_to_unicode(body)
elif 'text/plain' in part.get_content_type():
self._text = if_bytes_to_unicode(body)
# not multipart - i.e. plain text, no attachments,
# keeping fingers crossed
else:
if 'text/html' == msg.get_content_type():
self._html = if_bytes_to_unicode(msg.get_payload(decode=True))
elif 'text/plain' == msg.get_content_type():
self._text = if_bytes_to_unicode(msg.get_payload(decode=True))
def _render(self, template, body, **kwargs):
if template is None:
return body
if body is None:
return None
return _render_template(template, body=body, **kwargs)
@property
def attachments(self):
"""Returns list of file attachment objects"""
return self._attachments
def html(self, **kwargs):
"""Returns text as rendered by the html jinja2 template.
If no html was found, None is returned.
"""
# NOTE(cfrademan): If html found render from html template.
# Will return none otherwise.
return self._render(self._html_template,
body=self._html,
**kwargs)
def text(self, **kwargs):
"""Returns text as rendered by the text jinja2 template.
If no text is found, the HTML rendered from the HTML template is
returned, if both html and html template were present.
"""
# NOTE(cfrademan): If no html found, try render from text.
# However this will use the text template if possible.
if self._text is None and self._html is not None:
return self._render(self._html_template,
body=strip_tags(self._html),
**kwargs)
# NOTE(cfrademan): If text found render from html template.
return self._render(self._text_template,
body=self._text,
**kwargs)
def new_message(subject=None, email_from=None, email_to=None, old_msg=None,
multipart=True):
"""Utility function to generate an email message.
Either generating a new message form scratch, or updates a previous
message.
Args:
subject (str): Email Subject.
email_from(str): Sender email address.
email_to(str): Recipient email address.
old_msg (obj): object of python email library's EmailMessage class to
be updated.
multipart (bool): Whether or not to create MIMEMultipart msg.
Returns:
obj of type MIMEMultipart if multipart is True, else EmailMessage.
"""
if multipart is True:
new_msg = MIMEMultipart('related')
else:
new_msg = EmailMessage()
if email_from is not None:
new_msg['From'] = email_from
else:
new_msg['From'] = old_msg['From']
if email_to is not None:
new_msg['To'] = email_to
else:
new_msg['To'] = old_msg['To']
if subject is not None:
new_msg['Subject'] = subject
else:
new_msg['Subject'] = old_msg['Subject']
return new_msg
def format_msg(msg, html_template=None, text_template=None,
subject=None,
email_from=None,
email_to=None,
multipart=None,
attachments=True,
**kwargs):
"""Utility function to generate an email message with content rendered
from jinja2 templates.
If multipart is not specified, a multipart message is returned.
Args:
msg (obj): object of python email library's EmailMessage class.
html_template (str): Name of jinja2 template to use to render HTML.
text_template (str): Name of jinja2 template to use to render text.
subject (str): Email Subject.
email_from(str): Sender email address.
email_to(str): Recipient email address.
multipart (bool): Whether or not to create MIMEMultipart msg.
attachments (bool): Whether or not to include attachments.
kwargs (kwargs): Keyword Args used to render the templates.
Returns:
Python email message object.
"""
# NOTE(cfrademan): if multipart is set use that otherwise
# use original message value.
if multipart is None:
multipart = msg.is_multipart()
parse_body = ParseContent(msg, html_template, text_template)
new_msg = new_message(subject=subject,
email_from=email_from,
email_to=email_to,
old_msg=msg,
multipart=multipart)
html = parse_body.html(**kwargs)
text = parse_body.text(**kwargs)
if multipart is False:
if html is not None:
new_msg.set_content(html, subtype='html')
else:
new_msg.set_content(text)
else:
body = MIMEMultipart('alternative')
# NOTE(cfrademan): Attach parts into message container.
# According to RFC 2046, the last part of a multipart message,
# in this case the HTML message, is best and preferred.
if text is not None:
mime_text = MIMEText(text, 'plain')
body.attach(mime_text)
if html is not None:
mime_html = MIMEText(html, 'html')
body.attach(mime_html)
new_msg.attach(body)
if multipart is True and attachments is True:
for file in parse_body.attachments:
main_type, sub_type = file.content_type.split('/')
attachment = MIMEBase(main_type, sub_type)
with file as f:
attachment.set_payload(f.read())
encoders.encode_base64(attachment)
attachment.add_header('Content-Disposition', file.disposition,
filename=file.name)
if file.id is not None:
attachment.add_header('Content-ID', file.id)
new_msg.attach(attachment)
return new_msg
class SMTPClient(object):
"""Utility Class for sending email via SMTP server.
Example usage:
.. code:: python
with SMTPClient(email, server) as server:
result = server.send(rcpt, subject=subject, body=body, msg=msg)
Args:
email (str): Sender email address.
server (str): IP address of SMTP server.
port (int): TCP port of SMTP server.
tls (bool): Whether or not to use TLS.
auth (tuple): (username, password) to use for authentication.
Attributes:
smtp (obj): object of class smtplib.SMTP.
email (str): Sender email address.
"""
def __init__(self, email, server, port=587, tls=False, auth=None):
self.smtp = smtplib.SMTP(server, port)
if self.smtp.starttls()[0] != 220 and tls is True:
raise Exception('Start TLS failed')
if auth is not None:
username, password = auth
self.smtp.login(username, password)
self.email = email
def send(self, email=None, subject=None, body=None, msg=None):
"""Method to send Email.
Args:
email (str): Recipient Email address.
subject (str): Email Subject.
body (str): Email Body
msg (obj): object of python email library's EmailMessage class.
Returns:
Bool: True if sending was successful, else False.
"""
if msg is None:
msg = EmailMessage()
if email is not None:
if 'To' in msg:
del msg['To']
msg['To'] = email
if 'From' in msg:
del msg['From']
msg['From'] = self.email
if | |
return
coef, cov = curve_fit(fun3, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.3Ex%s+%.3Ex%s+%.7fx+%.4f'
cali_formula = text%(coef[0],s3,coef[1],s2,coef[2],coef[3])
self.results.setText(cali_formula)
text_p = 'Y(nm) = %.3Ex$^3$+%.3Ex$^2$+%.7fx+%.4f'
text_plot = text_p%(coef[0],coef[1],coef[2],coef[3])
elif poly_order == 4:
if x_p.size < 6:
print 'Please input at least 6 of x and wavelength values in the table above.'
return
coef, cov = curve_fit(fun4, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.3Ex%s+%.3Ex%s+%.3Ex%s+%.7fx+%.4f'
cali_formula = text%(coef[0],s4,coef[1],s3,coef[2],s2,coef[3],coef[4])
self.results.setText(cali_formula)
text_p = 'Y(nm) = %.3Ex$^4$+%.3Ex$^3$+%.3Ex$^2$+%.7fx+%.4f'
text_plot = text_p%(coef[0],coef[1],coef[2],coef[3],coef[4])
f_fit = np.polyval(coef,x_p)
res = y_nm-f_fit
sse = np.sum((res)**2)#
ssr = np.sum((f_fit-np.mean(y_nm))**2)#
ssy = np.sum((y_nm)**2)-(np.sum(y_nm)**2/y_nm.size)#
rs = ssr/ssy#
rms = (sse/(x_p.size-poly_order-1))**0.5
sigma_p = np.sqrt(np.diag(cov))
# sigma_t = np.sqrt(sse/(x_p.size-poly_order-1))#
# ssx = np.sum((x_p)**2)-(np.sum(x_p)**2/x_p.size)#
# sigma_a = sigma_t/(ssx**0.5)
# sigma_b = sigma_t*((1./x_p.size)+(np.mean(x_p)**2/ssx))**0.5#
for res_idx in range(res.size):
item_res = QtGui.QTableWidgetItem('%.3f'%(res[res_idx]))
self.tableWidget.setItem(res_idx, 2, item_res)
for para_idx in range(coef.size):
if coef.size == 2:
if para_idx+1 == 1:
para_text = 'coef %d = %.7f %s %.7f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
para_text = 'coef %d = %.4f %s %.4f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else: # 3p=1 [2 3] ; 4p = 1 2 [3 4] ; 5p = 1 2 3 [4 5]
if coef.size-(para_idx+1) >= 2:
para_text = 'coef %d = %.3E %s %.3E'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
if coef.size-(para_idx+1) == 1:
para_text = 'coef %d = %.7f %s %.7f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
para_text = 'coef %d = %.4f %s %.4f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
self.results.append(para_text)
self.results.append('R-squared = %.6f'%(rs))
self.results.append('RMS = %.4f'%(rms))
fig_cali, ax_cali = plt.subplots()
plt.gcf().canvas.set_window_title('Wavelength calibration curve')
plt.xlabel('Pixel (x)')
plt.ylabel('Wavelength (nm)')
plt.minorticks_on()
plt.tight_layout()
plt.plot(x_p,y_nm,'ko')
plt.plot(x_p,f_fit,'r-',label=r'%s R$^2$=%.6f'%(text_plot,rs))
plt.legend(loc='best')
plt.show()
#save result
if save_action:
file = open(filename , "w")
file.write('# Wavelength calibration data\n')
file.write('# box_y1 box_y2 flag \n')
file.write(' %d %d %d\n'%(box[0],box[1],box[2]))
file.write('# X_pixel Wavelength(nm) Residual \n')
for i in range(x_p.size):
line = ' %s %s %.3f\n'%(str(x_p[i]),str(y_nm[i]),res[i])#2
file.write(line)
file.write('# Results (Polynomial Regression, Order = %d) : \n'%(poly_order))
file.write('# 1. Coefficients : \n')
for i in range(coef.size):
file.write(' %.8E'%(coef[i]))
file.write('\n')
file.write('# 2. Standard error of coefficient : \n')
for i in range(sigma_p.size):
file.write(' %.8E'%(sigma_p[i]))
file.write('\n')
file.write('# 3. R-squared : \n')
file.write(' %.6f\n'%(rs))
file.write('# 4. RMS : \n')
file.write(' %.6f\n'%(rms))
file.close()
print r'Saved in folder: Save\Wavelength_calibration'
########################################################################
def Load_calibration_data(self):
global box
global x_p
global y_nm
global coef
global poly_order
global sigma_p,rs,rms
pm = '+/-'
s4 = u'\u2074' #4 up sscripts
s3 = u'\u00B3' #3 up sscripts
s2 = u'\u00B2' #2 up sscripts
x_p = np.array([],dtype=float)
y_nm = np.array([],dtype=float)
res = np.array([],dtype=float)
path_cali_data, _ = QtGui.QFileDialog.getOpenFileName(None, "Open csv file...",os.getcwd(), "text file (*.csv)")
if path_cali_data == "": return
file = open(path_cali_data , "r")
data = file.readlines()
file.close()
if len(data) > 45 :
print 'Open wrong file!!'
return
for item_idx in range(30): #set blank memory site
item1 = QtGui.QTableWidgetItem('')
self.tableWidget.setItem(item_idx, 0,item1)
item2 = QtGui.QTableWidgetItem('')
self.tableWidget.setItem(item_idx, 1,item2)
item3 = QtGui.QTableWidgetItem('')
self.tableWidget.setItem(item_idx, 2,item3)
box = np.asarray(data[2].split(' ')[1:4],dtype=int)#3rd line;first element is ''
for i in range(len(data)-13): #size of x_p
line_list = data[4+i].split(' ')#5th line
x_p = np.append(x_p,float(line_list[1]))
y_nm = np.append(y_nm,float(line_list[2]))
res = np.append(res,float(line_list[3]))
coef = np.asarray(data[len(data)-7].split(' ')[1::],dtype=float)
poly_order = coef.size-1
for idx in range(x_p.size):
item_x = QtGui.QTableWidgetItem(str(x_p[idx]))
item_w = QtGui.QTableWidgetItem(str(y_nm[idx]))
item_r = QtGui.QTableWidgetItem(str(res[idx]))
self.tableWidget.setItem(idx, 0,item_x)
self.tableWidget.setItem(idx, 1,item_w)
self.tableWidget.setItem(idx, 2,item_r)
self.lineEdit_2.setText('%d'%(int(box[0])+1))
self.lineEdit_3.setText('%d'%(int(box[1])))
self.lineEdit_2.setReadOnly(True)
self.lineEdit_3.setReadOnly(True)
self.pushButton_4.setText("Unlock")
self.pushButton_4.setStyleSheet('color: red')
#######################################################################
from scipy.optimize import curve_fit
def fun(x,a,b):
return a*x+b
def fun2(x,a,b,c):
return a*x**2+b*x+c
def fun3(x,a,b,c,d):
return a*x**3+b*x**2+c*x+d
def fun4(x,a,b,c,d,e):
return a*x**4+b*x**3+c*x**2+d*x+e
if poly_order == 1:
coef, cov = curve_fit(fun, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.7fx+%.4f'
cali_formula = text%(coef[0],coef[1])
self.results.setText(cali_formula)
elif poly_order == 2:
coef, cov = curve_fit(fun2, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.3Ex%s+%.7fx+%.4f'
cali_formula = text%(coef[0],s2,coef[1],coef[2])
self.results.setText(cali_formula)
elif poly_order == 3:
coef, cov = curve_fit(fun3, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.3Ex%s+%.3Ex%s+%.7fx+%.4f'
cali_formula = text%(coef[0],s3,coef[1],s2,coef[2],coef[3])
self.results.setText(cali_formula)
elif poly_order == 4:
coef, cov = curve_fit(fun4, x_p, y_nm)
sigma_p = np.sqrt(np.diag(cov))
text = 'Y(nm) = %.3Ex%s+%.3Ex%s+%.3Ex%s+%.7fx+%.4f'
cali_formula = text%(coef[0],s4,coef[1],s3,coef[2],s2,coef[3],coef[4])
self.results.setText(cali_formula)
f_fit = np.polyval(coef,x_p)
res = y_nm-f_fit
sse = np.sum((res)**2)#
ssr = np.sum((f_fit-np.mean(y_nm))**2)#
ssy = np.sum((y_nm)**2)-(np.sum(y_nm)**2/y_nm.size)#
rs = ssr/ssy#
rms = (sse/(x_p.size-poly_order-1))**0.5
sigma_p = np.sqrt(np.diag(cov))
for para_idx in range(coef.size):
if coef.size == 2:
if para_idx+1 == 1:
para_text = 'coef %d = %.7f %s %.7f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
para_text = 'coef %d = %.4f %s %.4f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
if coef.size-(para_idx+1) >= 2:
para_text = 'coef %d = %.3E %s %.3E'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
if coef.size-(para_idx+1) == 1:
para_text = 'coef %d = %.7f %s %.7f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
else:
para_text = 'coef %d = %.4f %s %.4f'%(para_idx+1,coef[para_idx],pm,sigma_p[para_idx])
self.results.append(para_text)
self.results.append('R-squared = %.6f'%(rs))
self.results.append('RMS = %.4f'%(rms))
########################################################################
def plot_cali_profile(self):
global intensity_box
if path == "":
print 'Please load spectral image.'
return
if coef.size < 2:
print 'Please do the wavelength-calibration or load the calibration data.'
return
img_name = path.split('/')[len(path.split('/'))-1]
savepath = 'Save//Profile_value'
if not os.path.exists(savepath): os.makedirs(savepath)
filename = 'Save//Profile_value//%s_profile_value.csv'%(img_name)
#Check profile value file exist or not
if os.path.isfile(filename):
info = "The profile value of image %s has been saved before.\n\
Do you want to overwrite existing profile?\n\
Yes : Overwrite the existing file.\n\
No : Save as new file.\n\
Ignore: Just show profile and don't save anything."%(img_name)
result = QtGui.QMessageBox.question(self.scrollArea,
"Overwrite or not...",info,
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No | QtGui.QMessageBox.Ignore, QtGui.QMessageBox.Yes)
if result == QtGui.QMessageBox.Yes:
save_action = True
elif result == QtGui.QMessageBox.Ignore:
save_action = False
else :
save_action = True
path_save, _ = QtGui.QFileDialog.getSaveFileName(None, "Profile value Save as...",os.getcwd(), "text (*.csv)") #
if path_save == "": return
filename = path_save
else:
save_action = True
plot_x, intensity_box = intensitybox(path,box)
x_wave = xwave(plot_x) #coef in global
fig_cali_p, ax_cali_p = plt.subplots()
plt.gcf().canvas.set_window_title("%s Profile in wavelength(Y=%d to %d)"%(img_name,int(box[0])+1,int(box[1])))
plt.xlim(x_wave.min(),x_wave.max())
plt.xlabel('Wavelength (nm)')
plt.ylabel('Intensity (counts)')
plt.minorticks_on()
plt.grid(True)
plt.tight_layout()
plt.plot(x_wave,intensity_box,'r-')
plt.locator_params(axis='x', tight=True,nbins=20) #set ticks range
plt.show()
if save_action: #save value or not
save_data = np.concatenate((x_wave.reshape(x_wave.size,1), intensity_box.reshape(x_wave.size,1)), axis=1) #兩組data結合成(x,y)
np.savetxt(filename,save_data,delimiter=',',fmt='%.4f')
print r'Saved to folder: Save\Profile_value'
img = Image.open(path)
xsize, ysize = img.size
pix_area = img.crop((0,box[0],xsize,box[1]))
f_spimg, ax_spimg = plt.subplots()
ax_spimg = plt.subplot2grid((4,1), (0, 0),rowspan=3)
plt.gcf().canvas.set_window_title("%s Profile in wavelength with image(Y=%d to %d)"%(img_name,int(box[0])+1,int(box[1])))
ax_spimg.set_xlabel('Wavelength (nm)')
ax_spimg.set_ylabel('Intensity (counts)')
plt.xlim(x_wave.min(),x_wave.max())
plt.minorticks_on()
plt.plot(x_wave,intensity_box,'r-')
plt.locator_params(axis='x',nbins=20) #set ticks range
plt.subplot2grid((4,1), (3, 0),rowspan=1,sharex=ax_spimg)
plt.axis("off")
if img.mode[0] == 'I' or img.mode[0] == 'F' or img.mode[0] == 'L' :
pix_area = pix_area.convert('F').resize((xsize,2),Image.BILINEAR)#zoom in (32-bit floating)
pix = np.asarray([pix_area.getdata()],np.float64).reshape(2,xsize)
pix = pix[:,np.argsort(x_wave)]
plt.imshow(pix,aspect='equal',extent=(x_wave.min(),x_wave.max(), 65, 0),interpolation='bilinear',cmap='Greys_r')#aspect='auto''nearest''bilinear',aspect='equal',extent=(0, xsize, 1000, 0)
else:
pix_area = pix_area.resize((xsize,2),Image.BILINEAR)#zoom in BILINEAR BICUBIC
pix = np.asarray([pix_area.getdata()],dtype=np.uint8).reshape(2,xsize,3)
pix = pix[:,np.argsort(x_wave),:]
plt.imshow(pix,aspect='equal',extent=(x_wave.min(),x_wave.max(), 100, 0),interpolation='bilinear')# bilinear bicubic aspect='auto''nearest''bilinear',aspect='equal',extent=(0, xsize, 1000, 0)
f_spimg.tight_layout()
plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.05)
plt.show()
chk_psi = self.checkBox_11.isChecked() #image in profile shape
if chk_psi == True:
fig_psi, ax_psi = plt.subplots()
plt.gcf().canvas.set_window_title("%s in profile shape(Y=%d to %d)"%(img_name,int(box[0])+1,int(box[1])))
plt.xlabel('Wavelength (nm)')
plt.ylabel('Intensity (counts)')
plt.minorticks_on()
high = np.asarray(intensity_box,dtype=int)
if img.mode[0] == 'I'or img.mode[0] == 'F' or img.mode[0] == 'L':
pix_area = pix_area.convert('F').resize((xsize,255),Image.BILINEAR)#zoom in (32-bit floating)
pix = np.asarray([pix_area.getdata()],np.uint8).reshape(255,xsize)
pix[pix < 0] = pix[pix < 0]+2**16
if high.max() > 255:
high = high/(65535/255) #change linear scale by 65535/255
for ii in range(xsize): #set to white for out of profile intensity region
pix[high[ii]+1:255,ii] = [0]# need change linear scale by 65535/255
pix = pix[:,np.argsort(x_wave)]
plt.imshow(pix,aspect='equal',extent=(x_wave.min(),x_wave.max(), 255, 0),interpolation='bilinear',cmap='Greys_r')#aspect='auto''nearest''bilinear',aspect='equal',extent=(0, xsize, 1000, 0)
else:
pix_area = pix_area.resize((xsize,255),Image.BILINEAR)#zoom in (32-bit floating)
pix = np.asarray([pix_area.getdata()],dtype=np.uint8).reshape(255,xsize,3)
for ii in range(xsize): #set to white for out of region
pix[high[ii]+1:255,ii] = [254,254,254]#
pix = pix[:,np.argsort(x_wave),:]
plt.imshow(pix,aspect='equal',extent=(x_wave.min(),x_wave.max(), 255, 0),interpolation='bilinear')#aspect='auto''nearest''bilinear',aspect='equal',extent=(0, xsize, 1000, 0)
plt.gca().invert_yaxis()
plt.tight_layout()
plt.show()
########################################################################
########################################################################
def Load_profile_value_plot_nw(self):
global profile_loc
profile_loc = np.array([],dtype=str)
self.scrollArea_lpvp = QtGui.QScrollArea()
self.scrollArea_lpvp.resize(490, 330)
self.scrollArea_lpvp.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)#AlwaysOn
self.scrollArea_lpvp.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea_lpvp.setWindowTitle('Load Profile(s) and Plot')
self.scrollArea_lpvp.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
self.nw_lpvp = QtGui.QWidget()
self.nw_lpvp.resize(480, 320)
self.scrollArea_lpvp.setWidget(self.nw_lpvp)
self.pushButton_adp | |
# Copyright 2019, The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EncodingStageInterface, its adaptive extension, and their implementations.
The interfaces are designed to support encoding and decoding that may happen
in different locations, including possibly different TensorFlow `Session`
objects, without the implementer needing to understand how any communication is
realized. Example scenarios include
* Both encoding and decoding can happen in the same location, such as for
experimental evaluation of efficiency, and no communication is necessary.
* Both encoding and decoding can happen in different locations, but run in the
same `Session`, such as distributed datacenter training. The communication
between locations is handled by TensorFlow.
* Encoding and decoding can happen on multiple locations, and communication
between them needs to happen outside of `TensorFlow`, such as encoding the
state of a model which is sent to a mobile device to be later decoded and used
for inference.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
import six
import tensorflow as tf
nest = tf.contrib.framework.nest
INITIAL_STATE_SCOPE_SUFFIX = '_initial_state'
UPDATE_STATE_SCOPE_SUFFIX = '_update_state'
GET_PARAMS_SCOPE_SUFFIX = '_get_params'
ENCODE_SCOPE_SUFFIX = '_encode'
DECODE_SCOPE_SUFFIX = '_decode'
class StateAggregationMode(enum.Enum):
"""Enum of available modes of aggregation for state.
This enum serves as a declaration of how the `state_update_tensors` returned
by the `encode` method of `StatefulEncodingStageInterface` should be
aggregated, before being passed to the `update_state` method.
This is primarily relevant for the setting where the encoding happens in
multiple locations, and a function of the encoded objects needs to be computed
at a central node. The implementation of these modes can differ depending on
the context. For instance, aggregation of these values in a star topology will
look differently from a multi-tier aggregation, which needs to know how some
intermediary representations is to be merged.
List of available values:
* `SUM`: Summation.
* `MIN`: Minimum.
* `MAX`: Maximum.
* `STACK`: Stacking along a new dimentsion. This can necessary for computing
arbitrary function of a collection of those values, such as a percentile.
"""
SUM = 1
MIN = 2
MAX = 3
STACK = 4
@six.add_metaclass(abc.ABCMeta)
class EncodingStageInterface(object):
"""Interface for the core of encoding logic.
This core interface should support encoding being executed in a variety of
contexts. For instance,
* Both encoding and decoding can happen in the same location, such as for
experimental evaluation of efficiency.
* Both encoding and decoding can happen in different locations, but run in the
same `Session`, such as distributed datacenter training.
* Encoding and decoding can happen in multiple locations, and communication
between them needs to happen outside of `TensorFlow`, such as compressing
a state of a model which is sent to a mobile device to be later used for
inference.
This interface is designed such that its implementer need not worry about the
potential communication patterns, and the implementation will support all.
Each implementation of this interface is supposed to be a relatively
elementary transformation. In particular, it does not need to realize any
representation savings by itself. Instead, a particular compositions of these
elementary transformations will realize the desired savings. These
compositions are realized by the `Encoder` class.
Each implementation should also be wrapped by `tf_style_encoding_stage` to
ensure adherence to the TensorFlow style guide. The adherence is enforced by
the `BaseEncodingStageTest` test class. See `test_utils.py` for more details.
For an adaptive version with a broader interface, see
`AdaptiveEncodingStageInterface`.
"""
@abc.abstractproperty
def name(self):
"""Name of the encoding stage.
This is a general name for the implementation of this interface, which is
used mainly by the `Encoder` class to create appropriate TensorFlow name
scopes when composing individual encoding stages.
"""
@abc.abstractproperty
def compressible_tensors_keys(self):
"""Keys of encoded tensors allowed to be further encoded.
These keys correspond to tensors in object returned by the `encode` method,
that are allowed to be further lossily compressed.
This property does not directly impact the functionality, but is used by the
`Encoder` class to validate composition.
Returns:
A list of `string` values.
"""
@abc.abstractproperty
def commutes_with_sum(self):
"""`True/False` based on whether the encoding commutes with sum.
Iff `True`, it means that given multiple inputs `x` with the same `shape`
and `dtype`, and the same `params` argument of the `encode` method, the
implementation is such that every value in the returned `encoded_tensors`
can be first summed, before being passed to the decoding functionality, and
the output should be identical (up to numerical precision) to summing the
fully decoded `Tensor` objects.
Note that this also assumes that each of the `decode` methods would be used
with the same values of `decode_params`.
Returns:
A boolean, `True` iff the encoding commutes with sum.
"""
@abc.abstractproperty
def decode_needs_input_shape(self):
"""Whether original shape of the encoded object is needed for decoding.
Iff `True`, it means that the `shape` of the `x` argument to the `encode`
method needs to be provided to the `decode` method. For instance, this is
needed for bitpacking, where inputs of multiple shapes can result in
identical bitpacked representations. Note however, the shape information
should not be stored in the return structure of the `encode` method.
This property will be used by `Encoder` to efficiently realize the
composition of implementations of this interface, and to make the necessary
shape information available.
"""
@abc.abstractmethod
def get_params(self):
"""Returns the parameters needed for encoding.
This method returns parameters controlling the behavior of the `encode` and
`decode` methods.
Implementation of this method should clearly document what are the keys of
parameters returned by this method, in order for a potential stateful
subclass being able to adaptively modify only existing keys.
Note that this method is not purely functional in terms of `TensorFlow`. The
params can be derived from an internal state of the compressor. For
instance, if a constructor optionally takes a `Variable` as an input
argument, which is allowed to change during iterative execution, that
`Variable`, or a function of it, would be exposed via this method. However,
only values that can be TensorFlow values should be exposed via params. If a
parameter always needs to be a Python constant, for instance used for Python
control flow, it should not be exposed via params, and accessed via `self`
instead.
Returns:
A tuple `(encode_params, decode_params)`, where
`encode_params`: A dictionary to be passed as argument to the `encode`
method.
`decode_params`: A dictionary to be passed as argument to the `decode`
method.
Each value of the dictionaries can be either a `Tensor` or any python
constant.
"""
@abc.abstractmethod
def encode(self, x, encode_params):
"""Encodes a given `Tensor`.
This method can create TensorFlow variables, which can be updated every time
the encoding is executed. An example is an encoder that internally remembers
the error incurred by previous encoding, and adds it to `x` in the next
iteration, before executing the encoding.
However, this method may be called in an entirely separate graph from all
other methods. That is, the implementer of this class can *only* assume such
variables can be accessed from this method but not from others.
Args:
x: A `Tensor`, input to be encoded.
encode_params: A dictionary, containing the parameters needed for the
encoding. The structure needs to be the return structure of the
`get_params` method.
Returns:
A dictionary of `Tensor` objects representing the encoded input `x`.
"""
@abc.abstractmethod
def decode(self,
encoded_tensors,
decode_params,
num_summands=None,
shape=None):
"""Decodes the encoded representation.
This method is the inverse transformation of the `encode` method. The
`encoded_tensors` argument is expected to be the output structure of
`encode` method.
The `num_summands` argument is needed because the | |
return isinstance(self.inner, IDLInterface) or \
isinstance(self.inner, IDLExternalInterface)
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if other.isPrimitive() or other.isString() or other.isEnum() or other.isDate():
return True
if self.isDictionary():
return (not other.nullable() and
(other.isNonCallbackInterface() or other.isSequence() or
other.isArray()))
assert self.isInterface()
# XXXbz need to check that the interfaces can't be implemented
# by the same object
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isSequence() or other.isArray()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'float',
'double',
# Other types
'any',
'domstring',
'object',
'date',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array'
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.float: IDLType.Tags.float,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.object: IDLType.Tags.object,
Types.date: IDLType.Tags.date,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface
}
def __init__(self, location, name, type):
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isTypedArray(self):
return self._typeTag >= IDLBuiltinType.Types.Int8Array and \
self._typeTag <= IDLBuiltinType.Types.Float64Array
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return self.isArrayBuffer() or \
self.isArrayBufferView() or \
self.isTypedArray()
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return self._typeTag == IDLBuiltinType.Types.float or \
self._typeTag == IDLBuiltinType.Types.double
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isPrimitive() or self.isString():
return (other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or
other.isDate())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isDate():
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isCallback() or
other.isDictionary() or other.isSequence() or
other.isArray())
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isArray() or other.isDate() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.date:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Date",
IDLBuiltinType.Types.date),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array)
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808,
9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in integerTypeSizes.items():
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# If the type allows null, rerun this matching on the inner type
if type.nullable():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
# Else, see if we can coerce to 'type'.
if self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
if self.value not in type.inner.values():
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, type.inner.identifier.name),
[location, type.inner.location])
return self
else:
raise WebIDLError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not type.isDictionary() and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
nullValue.type = type
return nullValue
class IDLInterfaceMember(IDLObjectWithIdentifier):
Tags = enum(
'Const',
'Attr',
'Method'
)
def __init__(self, location, identifier, tag):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
self.tag = tag
self._extendedAttrDict = {}
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
self.type = type
self.value = value
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
pass
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit,
static=False):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self.static = static
self.lenientThis = False
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self.static
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.type.isDictionary():
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence():
raise WebIDLError("An attribute cannot be of a sequence type",
[self.location])
if self.type.isUnion():
for f in self.type.flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if | |
0.5], [1, 1], [0.5, 0.5], [0, 0]]))
def test_Polygon_to_shapely_polygon():
exterior = [(0, 0), (1, 0), (1, 1), (0, 1)]
poly = ia.Polygon(exterior)
poly_shapely = poly.to_shapely_polygon()
for (x_exp, y_exp), (x_obs, y_obs) in zip(exterior, poly_shapely.exterior.coords):
assert x_exp - 1e-8 < x_obs < x_exp + 1e-8
assert y_exp - 1e-8 < y_obs < y_exp + 1e-8
def test_Polygon_to_bounding_box():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
bb = poly.to_bounding_box()
assert 0 - 1e-8 < bb.x1 < 0 + 1e-8
assert 0 - 1e-8 < bb.y1 < 0 + 1e-8
assert 1 - 1e-8 < bb.x2 < 1 + 1e-8
assert 1 - 1e-8 < bb.y2 < 1 + 1e-8
poly = ia.Polygon([(0.5, 0), (1, 1), (0, 1)])
bb = poly.to_bounding_box()
assert 0 - 1e-8 < bb.x1 < 0 + 1e-8
assert 0 - 1e-8 < bb.y1 < 0 + 1e-8
assert 1 - 1e-8 < bb.x2 < 1 + 1e-8
assert 1 - 1e-8 < bb.y2 < 1 + 1e-8
poly = ia.Polygon([(0.5, 0.5), (2, 0.1), (1, 1)])
bb = poly.to_bounding_box()
assert 0.5 - 1e-8 < bb.x1 < 0.5 + 1e-8
assert 0.1 - 1e-8 < bb.y1 < 0.1 + 1e-8
assert 2.0 - 1e-8 < bb.x2 < 2.0 + 1e-8
assert 1.0 - 1e-8 < bb.y2 < 1.0 + 1e-8
def test_Polygon_from_shapely():
exterior = [(0, 0), (1, 0), (1, 1), (0, 1)]
poly_shapely = shapely.geometry.Polygon(exterior)
poly = ia.Polygon.from_shapely(poly_shapely)
# shapely messes up the point ordering, so we try to correct it here
start_idx = 0
for i, (x, y) in enumerate(poly.exterior):
dist = np.sqrt((exterior[0][0] - x) ** 2 + (exterior[0][1] - x) ** 2)
if dist < 1e-4:
start_idx = i
break
poly = poly.change_first_point_by_index(start_idx)
for (x_exp, y_exp), (x_obs, y_obs) in zip(exterior, poly.exterior):
assert x_exp - 1e-8 < x_obs < x_exp + 1e-8
assert y_exp - 1e-8 < y_obs < y_exp + 1e-8
def test_Polygon_copy():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.copy()
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert np.allclose(poly.exterior, poly_cp.exterior)
assert poly.label == poly_cp.label
def test_Polygon_deepcopy():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.deepcopy()
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert np.allclose(poly.exterior, poly_cp.exterior)
assert poly.label == poly_cp.label
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
poly_cp = poly.deepcopy()
poly_cp.exterior[0, 0] = 100.0
poly_cp.label = "test2"
assert poly.exterior.dtype.type == poly_cp.exterior.dtype.type
assert poly.exterior.shape == poly_cp.exterior.shape
assert not np.allclose(poly.exterior, poly_cp.exterior)
assert not poly.label == poly_cp.label
def test_Polygon___repr__():
_test_Polygon_repr_str(lambda poly: poly.__repr__())
def test_Polygon___str__():
_test_Polygon_repr_str(lambda poly: poly.__str__())
def _test_Polygon_repr_str(func):
# ints
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.000), (x=1.000, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=test)"
# floats
poly = ia.Polygon([(0, 0.5), (1.5, 0), (1, 1), (0, 1)], label="test")
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.500), (x=1.500, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=test)"
# label None
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label=None)
s = func(poly)
assert s == "Polygon([(x=0.000, y=0.000), (x=1.000, y=0.000), (x=1.000, y=1.000), (x=0.000, y=1.000)] " \
+ "(4 points), label=None)"
# no points
poly = ia.Polygon([], label="test")
s = func(poly)
assert s == "Polygon([] (0 points), label=test)"
def test_Polygon_exterior_almost_equals():
# exactly same exterior
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly_a.exterior_almost_equals(poly_b)
# one point duplicated
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (1, 1), (1, 1), (0, 1)])
assert poly_a.exterior_almost_equals(poly_b)
# several points added without changing geometry
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 0), (0.5, 0), (1, 0), (1, 0.5), (1, 1), (0.5, 1), (0, 1), (0, 0.5)])
assert poly_a.exterior_almost_equals(poly_b)
# different order
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0, 1), (1, 1), (1, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
# tiny shift below tolerance
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0), (1+1e-6, 1), (0+1e-6, 1)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-3)
# tiny shift above tolerance
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0), (1+1e-6, 1), (0+1e-6, 1)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# shifted polygon towards half overlap
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(0.5, 0), (1.5, 0), (1.5, 1), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
# shifted polygon towards no overlap at all
poly_a = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(100, 0), (101, 0), (101, 1), (100, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
# both polygons without points
poly_a = ia.Polygon([])
poly_b = ia.Polygon([])
assert poly_a.exterior_almost_equals(poly_b)
# both polygons with one point
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(100, 100)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0+1e-6, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0+1, 0)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# both polygons with two points
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0), (2, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0+1e-6, 0), (1+1e-6, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# both polygons with three points
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1, -1), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0), (1+1e-6, 0), (0.5, 1)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
# one polygon with zero points, other with one
poly_a = ia.Polygon([])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([])
assert not poly_a.exterior_almost_equals(poly_b)
# one polygon with one point, other with two
poly_a = ia.Polygon([(-10, -20)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (0+1e-6, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0), (0+1e-4, 0)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# one polygon with one point, other with three
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0), (1, 0), (0.5, 1)])
poly_b = ia.Polygon([(0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0), (0, 0)])
assert poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0), (1, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (1, 0), (0, 0)])
assert not poly_a.exterior_almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0+1e-6, 0), (0, 0+1e-6)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-2)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0+1e-4, 0), (0, 0+1e-4)])
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-9)
# two polygons that are different, but with carefully placed points so that interpolation between polygon
# points is necessary to spot the difference
poly_a = ia.Polygon([(1, 0), (1, 1), (0, 1)])
poly_b = ia.Polygon([(1, 0), (1, 1), (0, 1), (1-1e-6, 1-1e-6)])
assert poly_a.exterior_almost_equals(poly_b, max_distance=1e-4, interpolate=0)
assert not poly_a.exterior_almost_equals(poly_b, max_distance=1e-4, interpolate=1)
def test_Polygon_almost_equals():
poly_a = ia.Polygon([])
poly_b = ia.Polygon([])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, 0)])
assert poly_a.almost_equals(poly_b)
poly_a = ia.Polygon([(0, 0)])
poly_b = ia.Polygon([(0, 0), (0, | |
----------
single_layer : integer or None
If not None, all polygons will be transfered to the
layer indicated by this number.
single_datatype : integer or None
If not None, all polygons will be transfered to the
datatype indicated by this number.
single_datatype : integer or None
If not None, all labels will be transfered to the
texttype indicated by this number.
Returns
-------
out : `Cell`
This cell.
"""
self.labels = self.get_labels()
if single_layer is not None and single_datatype is not None:
for lbl in self.labels:
lbl.layer = single_layer
lbl.texttype = single_texttype
elif single_layer is not None:
for lbl in self.labels:
lbl.layer = single_layer
elif single_datatype is not None:
for lbl in self.labels:
lbl.texttype = single_texttype
self.polygons = self.get_polygonsets()
self.paths = self.get_paths()
if single_layer is not None and single_datatype is not None:
for poly in self.polygons:
poly.layers = [single_layer] * len(poly.polygons)
poly.datatypes = [single_datatype] * len(poly.polygons)
for path in self.paths:
path.layers = [single_layer] * path.n
path.datatypes = [single_datatype] * path.n
elif single_layer is not None:
for poly in self.polygons:
poly.layers = [single_layer] * len(poly.polygons)
for path in self.paths:
path.layers = [single_layer] * path.n
elif single_datatype is not None:
for poly in self.polygons:
poly.datatypes = [single_datatype] * len(poly.polygons)
for path in self.paths:
path.datatypes = [single_datatype] * path.n
self.references = []
return self
def to_svg(self, outfile, scaling, attributes=""):
"""
Write an SVG fragment representation of this object.
Parameters
----------
outfile : open file
Output to write the SVG representation.
scaling : number
Scaling factor for the geometry.
attributes : string
Additional attributes to set for the cell group.
"""
outfile.write('<g id="')
outfile.write(self.name.replace("#", "_"))
outfile.write('" ')
outfile.write(attributes)
outfile.write(">\n")
for polygon in self.polygons:
polygon.to_svg(outfile, scaling)
for path in self.paths:
path.to_svg(outfile, scaling)
for label in self.labels:
label.to_svg(outfile, scaling)
for reference in self.references:
reference.to_svg(outfile, scaling)
outfile.write("</g>\n")
def write_svg(
self,
outfile,
scaling=10,
style=None,
fontstyle=None,
background="#222",
pad="5%",
):
"""
Export this cell to an SVG file.
The dimensions actually written on the GDSII file will be the
dimensions of the objects created times the ratio
unit/precision. For example, if a circle with radius 1.5 is
created and we set `GdsLibrary.unit` to 1.0e-6 (1 um) and
`GdsLibrary.precision` to 1.0e-9` (1 nm), the radius of the
circle will be 1.5 um and the GDSII file will contain the
dimension 1500 nm.
Parameters
----------
outfile : file, string or Path
The file (or path) where the GDSII stream will be written.
It must be opened for writing operations in binary format.
scaling : number
Scaling factor for the geometry.
style : dict or None
Dictionary indexed by (layer, datatype) tuples. Entries
must be dictionaries with CSS key-value pairs for the
presentation attributes of the geometry in that layer and
datatype.
fontstyle : dict or None
Dictionary indexed by (layer, texttype) tuples. Entries
must be dictionaries with CSS key-value pairs for the
presentation attributes of the labels in that layer and
texttype.
background : string or None
String specifying the background color. If None, no
background is inserted.
pad : number or string
Background margin around the cell bounding box. It can
be specified as a percentage of the width or height,
whichever is the largest.
Examples
--------
>>> cell = gdspy.Cell('MAIN')
>>> cell.add(gdspy.Rectangle((0, 0), (10, 10), layer=1))
>>> # Define fill and stroke for layer 1 and datatype 0
>>> mystyle = {(1, 0): {'fill': '#CC00FF',
'stroke': 'black'}}
>>> cell.write_svg('main.svg', style=mystyle)
"""
bb = self.get_bounding_box()
if bb is None:
return
close = True
if hasattr(outfile, "__fspath__"):
outfile = open(outfile.__fspath__(), "w")
elif isinstance(outfile, (basestring, Path)):
outfile = open(outfile, "w")
else:
close = False
if style is None:
style = {}
if fontstyle is None:
fontstyle = {}
bb *= scaling
x = bb[0, 0]
y = -bb[1, 1]
w = bb[1, 0] - bb[0, 0]
h = bb[1, 1] - bb[0, 1]
if background is not None:
if isinstance(pad, basestring):
if pad[-1] == "%":
pad = max(w, h) * float(pad[:-1]) / 100
else:
pad = float(pad)
x -= pad
y -= pad
w += 2 * pad
h += 2 * pad
outfile.write(
"""<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"
width="{}" height="{}" viewBox="{} {} {} {}">
<defs>
<style type="text/css">
""".format(
w, h, x, y, w, h
)
)
ldkeys, ltkeys = self.get_svg_classes()
for k in ldkeys:
l, d = k
if k in style:
style_dict = style[k]
else:
c = "rgb({}, {}, {})".format(
*[
int(255 * c + 0.5)
for c in colorsys.hsv_to_rgb(
(l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,
1 - ((l + d) % 8) / 12.0,
1 - (d % 3) / 4.0,
)
]
)
style_dict = {"stroke": c, "fill": c, "fill-opacity": "0.5"}
outfile.write(".l{}d{} {{".format(l, d))
outfile.write(" ".join("{}: {};".format(*x) for x in style_dict.items()))
outfile.write("}\n")
for k in ltkeys:
l, t = k
if k in fontstyle:
style_dict = fontstyle[k]
else:
c = "rgb({}, {}, {})".format(
*[
int(255 * c + 0.5)
for c in colorsys.hsv_to_rgb(
(l % 3) / 3.0 + (l % 6 // 3) / 6.0 + (l // 6) / 11.0,
1 - ((l + t) % 8) / 12.0,
1 - (t % 3) / 4.0,
)
]
)
style_dict = {"stroke": "none", "fill": c}
outfile.write(".l{}t{} {{".format(l, t))
outfile.write(" ".join("{}: {};".format(*x) for x in style_dict.items()))
outfile.write("}\n")
outfile.write("</style>\n")
for cell in self.get_dependencies(True):
cell.to_svg(outfile, scaling)
outfile.write("</defs>")
if background is not None:
outfile.write(
'<rect x="{}" y="{}" width="{}" height="{}" fill="{}" stroke="none"/>\n'.format(
x, y, w, h, background
)
)
self.to_svg(outfile, scaling, 'transform="scale(1 -1)"')
outfile.write("</svg>")
if close:
outfile.close()
class CellReference(object):
"""
Simple reference to an existing cell.
Parameters
----------
ref_cell : `Cell` or string
The referenced cell or its name.
origin : array-like[2]
Position where the reference is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
Attributes
----------
ref_cell : `Cell` or string
The referenced cell or its name.
origin : array-like[2]
Position where the reference is inserted.
rotation : number
Angle of rotation of the reference (in *degrees*).
magnification : number
Magnification factor for the reference.
x_reflection : bool
If True the reference is reflected parallel to the x
direction before being rotated.
ignore_missing : bool
If False a warning is issued when the referenced cell is not
found.
properties : {integer: string} dictionary
Properties for these elements.
"""
__slots__ = (
"ref_cell",
"origin",
"rotation",
"magnification",
"x_reflection",
"properties",
)
def __init__(
self,
ref_cell,
origin=(0, 0),
rotation=None,
magnification=None,
x_reflection=False,
ignore_missing=False,
):
self.origin = origin
self.ref_cell = ref_cell
self.rotation = rotation
self.magnification = magnification
self.x_reflection = x_reflection
self.properties = {}
if not isinstance(self.ref_cell, Cell) and not ignore_missing:
warnings.warn(
"[GDSPY] Cell {0} not found; operations on this "
"CellReference may not work.".format(self.ref_cell),
stacklevel=2,
)
def __str__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellReference ("{0}", at ({1[0]}, {1[1]}), rotation {2}, magnification {3}, reflection {4})'.format(
name, self.origin, self.rotation, self.magnification, self.x_reflection
)
def __repr__(self):
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
return 'CellReference("{0}", ({1[0]}, {1[1]}), {2}, {3}, {4})'.format(
name, self.origin, self.rotation, self.magnification, self.x_reflection
)
def to_gds(self, outfile, multiplier):
"""
Convert this object to a GDSII element.
Parameters
----------
outfile : open file
Output to write the GDSII.
multiplier : number
A number that multiplies all dimensions written in the GDSII
element.
"""
if isinstance(self.ref_cell, Cell):
name = self.ref_cell.name
else:
name = self.ref_cell
if len(name) % 2 != 0:
name = name + "\0"
outfile.write(struct.pack(">4H", 4, 0x0A00, 4 + len(name), 0x1206))
outfile.write(name.encode("ascii"))
if (
(self.rotation is not None)
or (self.magnification is not None)
or self.x_reflection
):
word = 0
values = b""
if self.x_reflection:
word += 0x8000
if not (self.magnification is None):
# This flag indicates that | |
= QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelRef_ManEditImage.setFont(font)
self.labelRef_ManEditImage.setObjectName("labelRef_ManEditImage")
self.gridLayout_3.addWidget(self.labelRef_ManEditImage, 0, 5, 1, 1)
spacerItem10 = QtGui.QSpacerItem(138, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.gridLayout_3.addItem(spacerItem10, 0, 6, 1, 1)
self.lineRef_line1_2 = QtGui.QFrame(self.layoutWidget4)
self.lineRef_line1_2.setFrameShape(QtGui.QFrame.VLine)
self.lineRef_line1_2.setFrameShadow(QtGui.QFrame.Sunken)
self.lineRef_line1_2.setObjectName("lineRef_line1_2")
self.gridLayout_3.addWidget(self.lineRef_line1_2, 0, 7, 3, 1)
self.pushButtonRef_ClearRefstars = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_ClearRefstars.setObjectName("pushButtonRef_ClearRefstars")
self.gridLayout_3.addWidget(self.pushButtonRef_ClearRefstars, 1, 8, 2, 1)
self.pushButtonRef_AddRefstarsfromCat = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_AddRefstarsfromCat.setObjectName("pushButtonRef_AddRefstarsfromCat")
self.gridLayout_3.addWidget(self.pushButtonRef_AddRefstarsfromCat, 2, 0, 1, 1)
self.pushButtonRef_AddRefstar = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_AddRefstar.setObjectName("pushButtonRef_AddRefstar")
self.gridLayout_3.addWidget(self.pushButtonRef_AddRefstar, 2, 2, 1, 1)
self.pushButtonRef_DeleteRefstar = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_DeleteRefstar.setObjectName("pushButtonRef_DeleteRefstar")
self.gridLayout_3.addWidget(self.pushButtonRef_DeleteRefstar, 2, 3, 1, 1)
self.pushButtonRef_AddRefstarImage = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_AddRefstarImage.setObjectName("pushButtonRef_AddRefstarImage")
self.gridLayout_3.addWidget(self.pushButtonRef_AddRefstarImage, 2, 5, 1, 1)
self.pushButtonRef_DeleteRefstar_2 = QtGui.QPushButton(self.layoutWidget4)
self.pushButtonRef_DeleteRefstar_2.setObjectName("pushButtonRef_DeleteRefstar_2")
self.gridLayout_3.addWidget(self.pushButtonRef_DeleteRefstar_2, 2, 6, 1, 1)
self.gridLayout_11.addLayout(self.gridLayout_3, 0, 0, 1, 1)
self.tableWidgetRefstars = QtGui.QTableWidget(self.layoutWidget4)
self.tableWidgetRefstars.setLineWidth(2)
self.tableWidgetRefstars.setRowCount(0)
self.tableWidgetRefstars.setObjectName("tableWidgetRefstars")
self.tableWidgetRefstars.setColumnCount(10)
self.tableWidgetRefstars.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(6, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(7, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(8, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetRefstars.setHorizontalHeaderItem(9, item)
self.gridLayout_11.addWidget(self.tableWidgetRefstars, 2, 0, 1, 1)
self.line_15 = QtGui.QFrame(self.layoutWidget4)
self.line_15.setFrameShape(QtGui.QFrame.HLine)
self.line_15.setFrameShadow(QtGui.QFrame.Sunken)
self.line_15.setObjectName("line_15")
self.gridLayout_11.addWidget(self.line_15, 1, 0, 1, 1)
self.tabWidget.addTab(self.RefstarsTab, "")
self.FinalizeTab = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.FinalizeTab.sizePolicy().hasHeightForWidth())
self.FinalizeTab.setSizePolicy(sizePolicy)
self.FinalizeTab.setObjectName("FinalizeTab")
self.textEditFin_ErrorMessages = QtGui.QTextEdit(self.FinalizeTab)
self.textEditFin_ErrorMessages.setEnabled(True)
self.textEditFin_ErrorMessages.setGeometry(QtCore.QRect(510, 10, 531, 261))
self.textEditFin_ErrorMessages.setAutoFillBackground(True)
self.textEditFin_ErrorMessages.setFrameShadow(QtGui.QFrame.Plain)
self.textEditFin_ErrorMessages.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.textEditFin_ErrorMessages.setObjectName("textEditFin_ErrorMessages")
self.layoutWidget5 = QtGui.QWidget(self.FinalizeTab)
self.layoutWidget5.setGeometry(QtCore.QRect(9, 11, 492, 253))
self.layoutWidget5.setObjectName("layoutWidget5")
self.gridLayout_19 = QtGui.QGridLayout(self.layoutWidget5)
self.gridLayout_19.setObjectName("gridLayout_19")
self.labelFin_Slitmask = QtGui.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.labelFin_Slitmask.setFont(font)
self.labelFin_Slitmask.setObjectName("labelFin_Slitmask")
self.gridLayout_19.addWidget(self.labelFin_Slitmask, 0, 0, 1, 1)
self.pushButtonFin_Validate = QtGui.QPushButton(self.layoutWidget5)
self.pushButtonFin_Validate.setObjectName("pushButtonFin_Validate")
self.gridLayout_19.addWidget(self.pushButtonFin_Validate, 1, 0, 1, 1)
self.toolButtonFin_WriteRSMT = QtGui.QToolButton(self.layoutWidget5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.toolButtonFin_WriteRSMT.sizePolicy().hasHeightForWidth())
self.toolButtonFin_WriteRSMT.setSizePolicy(sizePolicy)
self.toolButtonFin_WriteRSMT.setObjectName("toolButtonFin_WriteRSMT")
self.gridLayout_19.addWidget(self.toolButtonFin_WriteRSMT, 2, 0, 1, 1)
self.labelFin_WriteRSMTinfo = QtGui.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setItalic(True)
self.labelFin_WriteRSMTinfo.setFont(font)
self.labelFin_WriteRSMTinfo.setObjectName("labelFin_WriteRSMTinfo")
self.gridLayout_19.addWidget(self.labelFin_WriteRSMTinfo, 2, 1, 1, 1)
self.lineFin_3 = QtGui.QFrame(self.layoutWidget5)
self.lineFin_3.setFrameShape(QtGui.QFrame.HLine)
self.lineFin_3.setFrameShadow(QtGui.QFrame.Sunken)
self.lineFin_3.setObjectName("lineFin_3")
self.gridLayout_19.addWidget(self.lineFin_3, 3, 0, 1, 3)
self.labelFin_FChart_3 = QtGui.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.labelFin_FChart_3.setFont(font)
self.labelFin_FChart_3.setObjectName("labelFin_FChart_3")
self.gridLayout_19.addWidget(self.labelFin_FChart_3, 4, 0, 1, 1)
self.pushButtonFin_WriteXML = QtGui.QPushButton(self.layoutWidget5)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.pushButtonFin_WriteXML.sizePolicy().hasHeightForWidth())
self.pushButtonFin_WriteXML.setSizePolicy(sizePolicy)
self.pushButtonFin_WriteXML.setObjectName("pushButtonFin_WriteXML")
self.gridLayout_19.addWidget(self.pushButtonFin_WriteXML, 5, 0, 1, 1)
self.lineFin = QtGui.QFrame(self.layoutWidget5)
self.lineFin.setFrameShape(QtGui.QFrame.HLine)
self.lineFin.setFrameShadow(QtGui.QFrame.Sunken)
self.lineFin.setObjectName("lineFin")
self.gridLayout_19.addWidget(self.lineFin, 6, 0, 1, 3)
self.labelFin_FChart = QtGui.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.labelFin_FChart.setFont(font)
self.labelFin_FChart.setObjectName("labelFin_FChart")
self.gridLayout_19.addWidget(self.labelFin_FChart, 7, 0, 1, 1)
self.pushButtonFin_CreateFChart_Current = QtGui.QPushButton(self.layoutWidget5)
self.pushButtonFin_CreateFChart_Current.setObjectName("pushButtonFin_CreateFChart_Current")
self.gridLayout_19.addWidget(self.pushButtonFin_CreateFChart_Current, 8, 0, 1, 2)
self.pushButtonFin_CreateFChart_DSS = QtGui.QPushButton(self.layoutWidget5)
self.pushButtonFin_CreateFChart_DSS.setObjectName("pushButtonFin_CreateFChart_DSS")
self.gridLayout_19.addWidget(self.pushButtonFin_CreateFChart_DSS, 9, 0, 1, 2)
self.labelFin_CreateFChartinfo = QtGui.QLabel(self.layoutWidget5)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(8)
font.setItalic(True)
self.labelFin_CreateFChartinfo.setFont(font)
self.labelFin_CreateFChartinfo.setObjectName("labelFin_CreateFChartinfo")
self.gridLayout_19.addWidget(self.labelFin_CreateFChartinfo, 9, 2, 1, 1)
spacerItem11 = QtGui.QSpacerItem(200, 20, QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Minimum)
self.gridLayout_19.addItem(spacerItem11, 4, 2, 1, 1)
self.tabWidget.addTab(self.FinalizeTab, "")
self.line_10 = QtGui.QFrame(self.centralwidget)
self.line_10.setGeometry(QtCore.QRect(9, 198, 1051, 16))
self.line_10.setFrameShape(QtGui.QFrame.HLine)
self.line_10.setFrameShadow(QtGui.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.line_4 = QtGui.QFrame(self.centralwidget)
self.line_4.setGeometry(QtCore.QRect(420, 10, 16, 183))
self.line_4.setFrameShape(QtGui.QFrame.VLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.layoutWidget6 = QtGui.QWidget(self.centralwidget)
self.layoutWidget6.setGeometry(QtCore.QRect(510, 20, 511, 175))
self.layoutWidget6.setObjectName("layoutWidget6")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.layoutWidget6)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayoutMain_MaskCoords = QtGui.QVBoxLayout()
self.verticalLayoutMain_MaskCoords.setObjectName("verticalLayoutMain_MaskCoords")
self.labelMain_MaskHeading = QtGui.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setWeight(75)
font.setUnderline(True)
font.setBold(True)
self.labelMain_MaskHeading.setFont(font)
self.labelMain_MaskHeading.setObjectName("labelMain_MaskHeading")
self.verticalLayoutMain_MaskCoords.addWidget(self.labelMain_MaskHeading)
self.gridLayoutMain_MaskCoords = QtGui.QGridLayout()
self.gridLayoutMain_MaskCoords.setObjectName("gridLayoutMain_MaskCoords")
self.labelMain_CenRA = QtGui.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_CenRA.setFont(font)
self.labelMain_CenRA.setObjectName("labelMain_CenRA")
self.gridLayoutMain_MaskCoords.addWidget(self.labelMain_CenRA, 0, 0, 1, 1)
self.labelMain_CenDEC = QtGui.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_CenDEC.setFont(font)
self.labelMain_CenDEC.setObjectName("labelMain_CenDEC")
self.gridLayoutMain_MaskCoords.addWidget(self.labelMain_CenDEC, 0, 1, 1, 1)
self.labelMain_Epoch = QtGui.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_Epoch.setFont(font)
self.labelMain_Epoch.setObjectName("labelMain_Epoch")
self.gridLayoutMain_MaskCoords.addWidget(self.labelMain_Epoch, 0, 2, 1, 1)
self.labelMain_PA = QtGui.QLabel(self.layoutWidget6)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_PA.setFont(font)
self.labelMain_PA.setObjectName("labelMain_PA")
self.gridLayoutMain_MaskCoords.addWidget(self.labelMain_PA, 0, 3, 1, 1)
self.lineEditMain_CenRA = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_CenRA.setObjectName("lineEditMain_CenRA")
self.gridLayoutMain_MaskCoords.addWidget(self.lineEditMain_CenRA, 1, 0, 1, 1)
self.lineEditMain_CenDEC = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_CenDEC.setText("")
self.lineEditMain_CenDEC.setObjectName("lineEditMain_CenDEC")
self.gridLayoutMain_MaskCoords.addWidget(self.lineEditMain_CenDEC, 1, 1, 1, 1)
self.lineEditMain_Equinox = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_Equinox.setObjectName("lineEditMain_Equinox")
self.gridLayoutMain_MaskCoords.addWidget(self.lineEditMain_Equinox, 1, 2, 1, 1)
self.lineEditMain_PA = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_PA.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.lineEditMain_PA.setObjectName("lineEditMain_PA")
self.gridLayoutMain_MaskCoords.addWidget(self.lineEditMain_PA, 1, 3, 1, 1)
self.verticalLayoutMain_MaskCoords.addLayout(self.gridLayoutMain_MaskCoords)
self.verticalLayout_2.addLayout(self.verticalLayoutMain_MaskCoords)
self.line = QtGui.QFrame(self.layoutWidget6)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_2.addWidget(self.line)
self.verticalLayoutMain_RSSSetup = QtGui.QVBoxLayout()
self.verticalLayoutMain_RSSSetup.setObjectName("verticalLayoutMain_RSSSetup")
self.labelMain_RSSHeading = QtGui.QLabel(self.layoutWidget6)
self.labelMain_RSSHeading.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setWeight(75)
font.setUnderline(True)
font.setBold(True)
self.labelMain_RSSHeading.setFont(font)
self.labelMain_RSSHeading.setObjectName("labelMain_RSSHeading")
self.verticalLayoutMain_RSSSetup.addWidget(self.labelMain_RSSHeading)
self.gridLayoutMain_RSSSetup = QtGui.QGridLayout()
self.gridLayoutMain_RSSSetup.setObjectName("gridLayoutMain_RSSSetup")
self.labelMain_Filter = QtGui.QLabel(self.layoutWidget6)
self.labelMain_Filter.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_Filter.setFont(font)
self.labelMain_Filter.setObjectName("labelMain_Filter")
self.gridLayoutMain_RSSSetup.addWidget(self.labelMain_Filter, 0, 0, 1, 1)
self.labelMain_Grating = QtGui.QLabel(self.layoutWidget6)
self.labelMain_Grating.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_Grating.setFont(font)
self.labelMain_Grating.setObjectName("labelMain_Grating")
self.gridLayoutMain_RSSSetup.addWidget(self.labelMain_Grating, 0, 1, 1, 1)
self.labelMain_CamAng = QtGui.QLabel(self.layoutWidget6)
self.labelMain_CamAng.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_CamAng.setFont(font)
self.labelMain_CamAng.setObjectName("labelMain_CamAng")
self.gridLayoutMain_RSSSetup.addWidget(self.labelMain_CamAng, 0, 2, 1, 1)
self.labelMain_GrAng = QtGui.QLabel(self.layoutWidget6)
self.labelMain_GrAng.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setWeight(75)
font.setBold(True)
self.labelMain_GrAng.setFont(font)
self.labelMain_GrAng.setObjectName("labelMain_GrAng")
self.gridLayoutMain_RSSSetup.addWidget(self.labelMain_GrAng, 0, 3, 1, 1)
self.lineEditMain_Filter = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_Filter.setEnabled(False)
self.lineEditMain_Filter.setReadOnly(True)
self.lineEditMain_Filter.setObjectName("lineEditMain_Filter")
self.gridLayoutMain_RSSSetup.addWidget(self.lineEditMain_Filter, 1, 0, 1, 1)
self.lineEditMain_Grating = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_Grating.setEnabled(False)
self.lineEditMain_Grating.setText("")
self.lineEditMain_Grating.setReadOnly(True)
self.lineEditMain_Grating.setObjectName("lineEditMain_Grating")
self.gridLayoutMain_RSSSetup.addWidget(self.lineEditMain_Grating, 1, 1, 1, 1)
self.lineEditMain_CamAng = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_CamAng.setEnabled(False)
self.lineEditMain_CamAng.setReadOnly(True)
self.lineEditMain_CamAng.setObjectName("lineEditMain_CamAng")
self.gridLayoutMain_RSSSetup.addWidget(self.lineEditMain_CamAng, 1, 2, 1, 1)
self.lineEditMain_GrAng = QtGui.QLineEdit(self.layoutWidget6)
self.lineEditMain_GrAng.setEnabled(False)
self.lineEditMain_GrAng.setReadOnly(True)
self.lineEditMain_GrAng.setObjectName("lineEditMain_GrAng")
self.gridLayoutMain_RSSSetup.addWidget(self.lineEditMain_GrAng, 1, 3, 1, 1)
self.verticalLayoutMain_RSSSetup.addLayout(self.gridLayoutMain_RSSSetup)
self.verticalLayout_2.addLayout(self.verticalLayoutMain_RSSSetup)
self.layoutWidget7 = QtGui.QWidget(self.centralwidget)
self.layoutWidget7.setGeometry(QtCore.QRect(21, 21, 301, 126))
self.layoutWidget7.setObjectName("layoutWidget7")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.layoutWidget7)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.gridLayoutMain_Info = QtGui.QGridLayout()
self.gridLayoutMain_Info.setObjectName("gridLayoutMain_Info")
self.labelMain_TargetName = QtGui.QLabel(self.layoutWidget7)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setWeight(75)
font.setBold(True)
self.labelMain_TargetName.setFont(font)
self.labelMain_TargetName.setObjectName("labelMain_TargetName")
self.gridLayoutMain_Info.addWidget(self.labelMain_TargetName, 0, 0, 1, 1)
self.labelMain_MaskName = QtGui.QLabel(self.layoutWidget7)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setWeight(75)
font.setBold(True)
self.labelMain_MaskName.setFont(font)
self.labelMain_MaskName.setObjectName("labelMain_MaskName")
self.gridLayoutMain_Info.addWidget(self.labelMain_MaskName, 2, 0, 1, 1)
self.lineEditMain_MaskName = QtGui.QLineEdit(self.layoutWidget7)
self.lineEditMain_MaskName.setAutoFillBackground(False)
self.lineEditMain_MaskName.setObjectName("lineEditMain_MaskName")
self.gridLayoutMain_Info.addWidget(self.lineEditMain_MaskName, 2, 1, 1, 1)
self.lineEditMain_TargetName = QtGui.QLineEdit(self.layoutWidget7)
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
self.lineEditMain_TargetName.setPalette(palette)
self.lineEditMain_TargetName.setObjectName("lineEditMain_TargetName")
self.gridLayoutMain_Info.addWidget(self.lineEditMain_TargetName, 0, 1, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayoutMain_Info)
self.labelMain_Mode = QtGui.QLabel(self.layoutWidget7)
self.labelMain_Mode.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.labelMain_Mode.setFont(font)
self.labelMain_Mode.setObjectName("labelMain_Mode")
self.verticalLayout_5.addWidget(self.labelMain_Mode)
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setObjectName("gridLayout_8")
spacerItem12 = QtGui.QSpacerItem(90, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem12, 0, 1, 1, 1)
self.gridLayout_7 = QtGui.QGridLayout()
self.gridLayout_7.setObjectName("gridLayout_7")
self.labelMain_Centroiding = QtGui.QLabel(self.layoutWidget7)
self.labelMain_Centroiding.setEnabled(False)
self.labelMain_Centroiding.setObjectName("labelMain_Centroiding")
self.gridLayout_7.addWidget(self.labelMain_Centroiding, 0, 1, 1, 1)
self.labelMain_CentroidingOnOff = QtGui.QLabel(self.layoutWidget7)
self.labelMain_CentroidingOnOff.setEnabled(False)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setWeight(75)
font.setBold(True)
self.labelMain_CentroidingOnOff.setFont(font)
self.labelMain_CentroidingOnOff.setObjectName("labelMain_CentroidingOnOff")
self.gridLayout_7.addWidget(self.labelMain_CentroidingOnOff, 0, 2, 1, 1)
self.lineEditMain_Mode = QtGui.QLineEdit(self.layoutWidget7)
self.lineEditMain_Mode.setEnabled(False)
self.lineEditMain_Mode.setReadOnly(True)
self.lineEditMain_Mode.setObjectName("lineEditMain_Mode")
self.gridLayout_7.addWidget(self.lineEditMain_Mode, 0, 0, 1, 1)
self.gridLayout_8.addLayout(self.gridLayout_7, 0, 0, 1, 1)
self.verticalLayout_5.addLayout(self.gridLayout_8)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1121, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtGui.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuHelp = QtGui.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName("actionQuit")
self.actionLoad_Catalogue = QtGui.QAction(MainWindow)
self.actionLoad_Catalogue.setObjectName("actionLoad_Catalogue")
self.actionLoad_Image = QtGui.QAction(MainWindow)
self.actionLoad_Image.setObjectName("actionLoad_Image")
self.actionOpen = QtGui.QAction(MainWindow)
self.actionOpen.setObjectName("actionOpen")
self.actionOnline_user_manual = QtGui.QAction(MainWindow)
self.actionOnline_user_manual.setObjectName("actionOnline_user_manual")
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuFile.addAction(self.actionLoad_Image)
self.menuFile.addAction(self.actionLoad_Catalogue)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionQuit)
self.menuHelp.addAction(self.actionOnline_user_manual)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.labelInfo_Proposer.setBuddy(self.lineEditInfo_Proposer)
self.labelInfo_Creator.setBuddy(self.lineEditInfo_Creator)
self.labelInfo_Filter.setBuddy(self.comboBoxInfo_Filter)
self.labelInfo_Grating.setBuddy(self.comboBoxInfo_Grating)
self.labelInfo_CamAng.setBuddy(self.horizontalSliderInfo_CamAng)
self.labelInfo_GrAng.setBuddy(self.horizontalSliderInfo_CamAng)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.actionQuit, QtCore.SIGNAL("activated()"), MainWindow.close)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.labelOpt_Refstarstext.setEnabled)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.labelOpt_Refstarstext_1.setEnabled)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.lineEditOpt_NumRefstars.setEnabled)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.labelOpt_Refstarstext_2.setEnabled)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.lineEditOpt_AllRefstars.setEnabled)
QtCore.QObject.connect(self.checkBoxOpt_IncRefstars, QtCore.SIGNAL("clicked(bool)"), self.labelOpt_Refstarstest_3.setEnabled)
QtCore.QObject.connect(self.radioButtonInfo_Catalogue, QtCore.SIGNAL("clicked(bool)"), self.toolButtonCat_Load.setEnabled)
QtCore.QObject.connect(self.radioButtonInfo_Manual, QtCore.SIGNAL("clicked(bool)"), self.toolButtonCat_Load.setDisabled)
QtCore.QObject.connect(self.comboBoxInfo_Filter, QtCore.SIGNAL("activated(QString)"), self.lineEditMain_Filter.setText)
QtCore.QObject.connect(self.comboBoxInfo_Grating, QtCore.SIGNAL("activated(QString)"), self.lineEditMain_Grating.setText)
QtCore.QObject.connect(self.lineEditInfo_CamAng, QtCore.SIGNAL("textChanged(QString)"), self.lineEditMain_CamAng.setText)
QtCore.QObject.connect(self.lineEditInfo_GrAng, QtCore.SIGNAL("textChanged(QString)"), self.lineEditInfo_GrAng.setText)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "pySlitMask", None, QtGui.QApplication.UnicodeUTF8))
self.textEditInstructions.setHtml(QtGui.QApplication.translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:12pt; font-weight:600; text-decoration: underline; color:#0055ff;\">pySlitMask:</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:12pt; font-weight:600; text-decoration: underline; color:#0055ff;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">This is a quick start guide to design a MOS mask for the RSS on SALT. This tool was designed to be used with object catalogues. A full manual mode is not supported yet. It is therefore strongly recommended that a catalogue of target objects be created before using this tool. The tool makes use of a priority based optimization, this should be kept in mind when creating the input catalogue.</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt; font-weight:600; text-decoration: underline;\">Preparation:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">1. Check the allowed position angle for the field:</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\"> </span><span style=\" font-family:\'Sans\'; font-size:10pt; font-style:italic;\">http://www.salt.ac.za/fileadmin/files/observing/documents/SALT_PA_Visibility.pdf</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">2. Prepare an image of the field with accurate astrometry. </span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\">3. Prepare an input catalogue of target objects</span></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt;\"> The input catalogue should be an ascii file with the following formatting:</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt; font-weight:600; font-style:italic;\">id RA DEC epoch magnitude band priority [width]* [length]* [tilt]*</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans\'; font-size:10pt; font-weight:600; font-style:italic;\"></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans\'; font-size:10pt; font-weight:600;\">*</span><span style=\" font-family:\'Sans\'; font-size:10pt;\"> these are optional and can be | |
<gh_stars>1-10
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import sys
import llnl.util.tty as tty
class Hdf5(CMakePackage):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "https://portal.hdfgroup.org"
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.7/src/hdf5-1.10.7.tar.gz"
list_url = "https://support.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
git = "https://github.com/HDFGroup/hdf5.git"
maintainers = ['lrknox', 'brtnfld', 'byrnHDF', 'ChristopherHogan', 'epourmal',
'gheber', 'hyoklee', 'lkurz', 'soumagne']
test_requires_compiler = True
# The 'develop' version is renamed so that we could uninstall (or patch) it
# without affecting other develop version.
version('develop-1.13', branch='develop')
version('develop-1.12', branch='hdf5_1_12')
version('develop-1.10', branch='hdf5_1_10')
version('develop-1.8', branch='hdf5_1_8')
version('1.12.1', sha256='79c66ff67e666665369396e9c90b32e238e501f345afd2234186bfb8331081ca')
version('1.12.0', sha256='a62dcb276658cb78e6795dd29bf926ed7a9bc4edf6e77025cd2c689a8f97c17a')
# HDF5 1.12 broke API compatibility, so we currently prefer the latest
# 1.10 release. packages that want later versions of HDF5 should specify,
# e.g., depends_on("[email protected]:") to get 1.12 or higher.
version('1.10.7', sha256='7a1a0a54371275ce2dfc5cd093775bb025c365846512961e7e5ceaecb437ef15', preferred=True)
version('1.10.6', sha256='5f9a3ee85db4ea1d3b1fa9159352aebc2af72732fc2f58c96a3f0768dba0e9aa')
version('1.10.5', sha256='6d4ce8bf902a97b050f6f491f4268634e252a63dadd6656a1a9be5b7b7726fa8')
version('1.10.4', sha256='8f60dc4dd6ab5fcd23c750d1dc5bca3d0453bdce5c8cdaf0a4a61a9d1122adb2')
version('1.10.3', sha256='b600d7c914cfa80ae127cd1a1539981213fee9994ac22ebec9e3845e951d9b39')
version('1.10.2', sha256='bfec1be8c366965a99812cf02ddc97e4b708c1754fccba5414d4adccdc073866')
version('1.10.1', sha256='048a9d149fb99aaa1680a712963f5a78e9c43b588d0e79d55e06760ec377c172')
version('1.10.0-patch1', sha256='6e78cfe32a10e6e0629393cdfddf6cfa536571efdaf85f08e35326e1b4e9eff0')
version('1.10.0', sha256='81f6201aba5c30dced5dcd62f5d5477a2790fd5850e02ac514ca8bf3e2bb375a')
version('1.8.22', sha256='8406d96d9355ef8961d2739fb8fd5474ad4cdf52f3cfac657733defd9709bfaa')
version('1.8.21', sha256='87d8c82eba5cf766d97cd06c054f4639c1049c4adeaa3a79f77f8bd374f80f37')
version('1.8.19', sha256='a4335849f19fae88c264fd0df046bc321a78c536b2548fc508627a790564dc38')
version('1.8.18', sha256='cdb195ad8d9e6782acf24b2488061289f615628c2ccda8457b0a0c3fb7a8a063')
version('1.8.17', sha256='d9cda297ee76ade9881c4208987939250d397bae6252d0ccb66fa7d24d67e263')
version('1.8.16', sha256='ed17178abd9928a7237f30370189ba767b9e39e0db45917c2ac4665eb9cb4771')
version('1.8.15', sha256='4e963216b7d32469596bc1321a8c3f6e0c278dcbbdb7be6414c63c081b34c275')
version('1.8.14', sha256='1dbefeeef7f591897c632b2b090db96bb8d35ad035beaa36bc39cb2bc67e0639')
version('1.8.13', sha256='82f6b38eec103b4fccfbf14892786e0c27a8135d3252d8601cf5bf20066d38c1')
version('1.8.12', sha256='b5cccea850096962b5fd9e96f22c4f47d2379224bb41130d9bc038bb6c37dfcb')
version('1.8.10', sha256='4813b79c5fb8701a625b9924b8203bc7154a77f9b826ad4e034144b4056a160a')
variant('shared', default=True,
description='Builds a shared version of the library')
variant('hl', default=False, description='Enable the high-level library')
variant('cxx', default=False, description='Enable C++ support')
variant('fortran', default=False, description='Enable Fortran support')
variant('java', default=False, description='Enable Java support')
variant('threadsafe', default=False,
description='Enable thread-safe capabilities')
variant('tools', default=True, description='Enable building tools')
variant('mpi', default=True, description='Enable MPI support')
variant('szip', default=False, description='Enable szip support')
# Build HDF5 with API compatibility.
variant('api', default='default',
description='Choose api compatibility for earlier version',
values=('default', 'v114', 'v112', 'v110', 'v18', 'v16'),
multi=False)
depends_on('[email protected]:', type='build')
depends_on('mpi', when='+mpi')
depends_on('java', type=('build', 'run'), when='+java')
# numactl does not currently build on darwin
if sys.platform != 'darwin':
depends_on('numactl', when='+mpi+fortran')
depends_on('szip', when='+szip')
depends_on('[email protected]:')
# The compiler wrappers (h5cc, h5fc, etc.) run 'pkg-config'.
depends_on('pkgconfig', type='run')
conflicts('api=v114', when='@1.6:1.12.99',
msg='v114 is not compatible with this release')
conflicts('api=v112', when='@1.6:1.10.99',
msg='v112 is not compatible with this release')
conflicts('api=v110', when='@1.6:1.8.99',
msg='v110 is not compatible with this release')
conflicts('api=v18', when='@1.6:1.6.99',
msg='v18 is not compatible with this release')
# The Java wrappers and associated libhdf5_java library
# were first available in 1.10
conflicts('+java', when='@:1.9')
# The Java wrappers cannot be built without shared libs.
conflicts('+java', when='~shared')
# There are several officially unsupported combinations of the features:
# 1. Thread safety is not guaranteed via high-level C-API but in some cases
# it works.
# conflicts('+threadsafe+hl')
# 2. Thread safety is not guaranteed via Fortran (CXX) API, but it's
# possible for a dependency tree to contain a package that uses Fortran
# (CXX) API in a single thread and another one that uses low-level C-API
# in multiple threads. To allow for such scenarios, we don't specify the
# following conflicts.
# conflicts('+threadsafe+cxx')
# conflicts('+threadsafe+fortran')
# 3. Parallel features are not supported via CXX API, but for the reasons
# described in #2 we allow for such combination.
# conflicts('+mpi+cxx')
# There are known build failures with [email protected]. This issue is
# discussed and patch is provided at
# https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/747951.
patch('h5f90global-mult-obj-same-equivalence-same-common-block.patch',
when='@1.10.1%intel@18')
# Turn line comments into block comments to conform with pre-C99 language
# standards. Versions of hdf5 after 1.8.10 don't require this patch,
# either because they conform to pre-C99 or neglect to ask for pre-C99
# language standards from their compiler. The hdf5 build system adds
# the -ansi cflag (run 'man gcc' for info on -ansi) for some versions
# of some compilers (see hdf5-1.8.10/config/gnu-flags). The hdf5 build
# system does not provide an option to disable -ansi, but since the
# pre-C99 code is restricted to just five lines of line comments in
# three src files, this patch accomplishes the simple task of patching the
# three src files and leaves the hdf5 build system alone.
patch('pre-c99-comments.patch', when='@1.8.10')
# There are build errors with GCC 8, see
# https://forum.hdfgroup.org/t/1-10-2-h5detect-compile-error-gcc-8-1-0-on-centos-7-2-solved/4441
patch('https://salsa.debian.org/debian-gis-team/hdf5/raw/bf94804af5f80f662cad80a5527535b3c6537df6/debian/patches/gcc-8.patch',
sha256='57cee5ff1992b4098eda079815c36fc2da9b10e00a9056df054f2384c4fc7523',
when='@1.10.2%gcc@8:')
# Disable MPI C++ interface when C++ is disabled, otherwise downstream
# libraries fail to link; see https://github.com/spack/spack/issues/12586
patch('h5public-skip-mpicxx.patch', when='@1.8.10:1.8.21,1.10.0:1.10.5+mpi~cxx',
sha256='b61e2f058964ad85be6ee5ecea10080bf79e73f83ff88d1fa4b602d00209da9c')
# Fixes BOZ literal constant error when compiled with GCC 10.
# The issue is described here: https://github.com/spack/spack/issues/18625
patch('hdf5_1.8_gcc10.patch', when='@:1.8.21',
sha256='0e20187cda3980a4fdff410da92358b63de7ebef2df1d7a425371af78e50f666')
# The argument 'buf_size' of the C function 'h5fget_file_image_c' is
# declared as intent(in) though it is modified by the invocation. As a
# result, aggressive compilers such as Fujitsu's may do a wrong
# optimization to cause an error.
def patch(self):
filter_file(
'INTEGER(SIZE_T), INTENT(IN) :: buf_size',
'INTEGER(SIZE_T), INTENT(OUT) :: buf_size',
'fortran/src/H5Fff.F90',
string=True, ignore_absent=True)
filter_file(
'INTEGER(SIZE_T), INTENT(IN) :: buf_size',
'INTEGER(SIZE_T), INTENT(OUT) :: buf_size',
'fortran/src/H5Fff_F03.f90',
string=True, ignore_absent=True)
# The parallel compiler wrappers (i.e. h5pcc, h5pfc, etc.) reference MPI
# compiler wrappers and do not need to be changed.
filter_compiler_wrappers('h5cc', 'h5hlcc',
'h5fc', 'h5hlfc',
'h5c++', 'h5hlc++',
relative_root='bin')
def url_for_version(self, version):
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-{0}/hdf5-{1}/src/hdf5-{1}.tar.gz"
return url.format(version.up_to(2), version)
def flag_handler(self, name, flags):
cmake_flags = []
if name == "cflags":
if self.spec.satisfies('%gcc') \
or self.spec.satisfies('%clang'):
# Quiet warnings/errors about implicit declaration of functions
# in C99:
cmake_flags.append("-Wno-implicit-function-declaration")
# Note that this flag will cause an error if building %nvhpc.
if self.spec.satisfies('@:1.8.12~shared'):
# More recent versions set CMAKE_POSITION_INDEPENDENT_CODE to
# True and build with PIC flags.
cmake_flags.append(self.compiler.cc_pic_flag)
elif name == 'cxxflags':
if self.spec.satisfies('@:1.8.12+cxx~shared'):
cmake_flags.append(self.compiler.cxx_pic_flag)
elif name == "fflags":
if self.spec.satisfies('%cce+fortran'):
# Cray compiler generates module files with uppercase names by
# default, which is not handled by the CMake scripts. The
# following flag forces the compiler to produce module files
# with lowercase names.
cmake_flags.append('-ef')
if self.spec.satisfies('@:1.8.12+fortran~shared'):
cmake_flags.append(self.compiler.fc_pic_flag)
elif name == "ldlibs":
if '+fortran %fj' in self.spec:
cmake_flags.extend(['-lfj90i', '-lfj90f',
'-lfjsrcinfo', '-lelf'])
return flags, None, (cmake_flags or None)
@property
def libs(self):
"""HDF5 can be queried for the following parameters:
- "hl": high-level interface
- "cxx": C++ APIs
- "fortran": Fortran APIs
- "java": Java APIs
:return: list of matching libraries
"""
query_parameters = self.spec.last_query.extra_parameters
shared = '+shared' in self.spec
# This map contains a translation from query_parameters
# to the libraries needed
query2libraries = {
tuple(): ['libhdf5'],
('cxx', 'fortran', 'hl', 'java'): [
# When installed with Autotools, the basename of the real
# library file implementing the High-level Fortran interface is
# 'libhdf5hl_fortran'. Starting versions 1.8.22, 1.10.5 and
# 1.12.0, the Autotools installation also produces a symbolic
# link 'libhdf5_hl_fortran.<so/a>' to
# 'libhdf5hl_fortran.<so/a>'. Note that in the case of the
# dynamic library, the latter is a symlink to the real sonamed
# file 'libhdf5_fortran.so.<abi-version>'. This means that all
# dynamically linked executables/libraries of the dependent
# packages need 'libhdf5_fortran.so.<abi-version>' with the same
# DT_SONAME entry. However, the CMake installation (at least
# starting version 1.8.10) does not produce it. Instead, the
# basename of the library file is 'libhdf5_hl_fortran'. Which
# means that switching to CMake requires rebuilding of all
# dependant packages that use the High-level Fortran interface.
# Therefore, we do not try to preserve backward compatibility
# with Autotools installations by creating symlinks. The only
# packages that could benefit from it would be those that
# hardcode the library name in their building systems. Such
# packages should simply be patched.
'libhdf5_hl_fortran',
'libhdf5_hl_f90cstub',
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5_f90cstub',
'libhdf5_java',
'libhdf5',
],
('cxx', 'hl'): [
'libhdf5_hl_cpp',
'libhdf5_hl',
'libhdf5',
],
('fortran', 'hl'): [
'libhdf5_hl_fortran',
'libhdf5_hl_f90cstub',
'libhdf5_hl',
'libhdf5_fortran',
'libhdf5_f90cstub',
'libhdf5',
],
('hl',): [
'libhdf5_hl',
'libhdf5',
],
('cxx', 'fortran'): [
'libhdf5_fortran',
'libhdf5_f90cstub',
'libhdf5_cpp',
'libhdf5',
],
('cxx',): [
'libhdf5_cpp',
'libhdf5',
],
('fortran',): [
'libhdf5_fortran',
'libhdf5_f90cstub',
'libhdf5',
],
('java',): [
'libhdf5_java',
'libhdf5',
]
}
# Turn the query into the appropriate key
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(
libraries, root=self.prefix, shared=shared, recursive=True
)
@when('@:1.8.21,1.10.0:1.10.5+szip')
def setup_build_environment(self, env):
env.set('SZIP_INSTALL', self.spec['szip'].prefix)
@run_before('cmake')
def fortran_check(self):
if '+fortran' in self.spec and not self.compiler.fc:
msg = 'cannot build a Fortran variant without a Fortran compiler'
raise RuntimeError(msg)
def cmake_args(self):
spec = self.spec
if spec.satisfies('@:1.8.15+shared'):
tty.warn('hdf5@:1.8.15+shared does not produce static libraries')
args = [
# Always | |
"""
Working with PSID in python
@author : <NAME> <<EMAIL>>
@date : 2015-02-04 09:02:56
use the read_csv option `usecols` to only keep what we need
"""
import re
import os
import gc
import os.path
import zipfile
import requests
import lxml.html
import numpy as np
import pandas as pd
# ----------- #
# Downloading #
# ----------- #
# Define lookup that maps years into request numbers.
file_year = map(str, list(range(1968, 1998)) + list(range(1999, 2012, 2)))
request_numbers = map(str, ([1056] + list(range(1058, 1083)) +
list(range(1047, 1052)) +
[1040, 1052, 1132, 1139, 1152, 1156]))
file_lookup = dict(zip(file_year, request_numbers))
file_lookup["ind"] = "1053"
def start_psid_session(user=None, password=None):
"""
Use user supplied login details to log in to umich site for PSID
download
"""
login_url = "http://simba.isr.umich.edu/u/Login.aspx"
# start html session so we can log in
session = requests.session()
start = session.get(login_url)
html = start.text
root = lxml.html.fromstring(html)
# Stuff so we can log in
EVAL = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value']
VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value']
acc_pwd = {'ctl00$ContentPlaceHolder1$Login1$UserName': user,
'ctl00$ContentPlaceHolder1$Login1$Password': password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton': 'Log In',
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': VIEWSTATE,
'__EVENTVALIDATION': EVAL}
# Send login message to PSID site
session.post(login_url, data=acc_pwd)
# Check for login
z = session.get('http://simba.isr.umich.edu/data/data.aspx')
tf2 = 'Logout' in str(z.content)
print('Successful login: %s' % (tf2))
return session
# Function to download PSID zip file
def download_psid(number, local_filename, session):
"""
Download a zip file form the PSID and save to local_filename
"""
request_start = 'http://simba.isr.umich.edu/Zips/GetFile.aspx?file='
# Get the file using requests
r = session.get(request_start + number, stream=True)
with open(local_filename, 'wb') as f:
# Write it out in chunks incase it's big
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
# Extracting PSID using psid_unzip.
def psid_unzip(filename, extractall=False):
zfile = zipfile.ZipFile(filename)
def keep_file(n):
if extractall:
return True
else:
return ".sas" in name or ".txt" in name or ".pdf" in name
for name in zfile.namelist():
# Only take out the files we want
if keep_file(name):
(dirname, filename) = os.path.split(name)
if ".pdf" in name: # Different directory for Codebooks
dirname = dirname + "Codebooks"
if ".txt" in name:
nascii = name # Keep track of ascii name
if ".sas" in name:
nsas = name # Keep track of sas name
print("Decompressing %s on %s" % (filename, dirname))
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname) # Extract file
return (nsas, nascii)
def sascii2csv(sas_name, ascii_name, csv_name, remove_orig=True):
"""
Read in ascii data from SAS commands and write out csv
"""
# Open sas file
x = open(sas_name, "r")
dat = x.read()
dat_split = dat.split('\n')
# RE for variable designation
re_var = "^\s*(?P<variable>\S+)\s+"
# RE for variable label
re_label = '[(LABEL)(label)]\s*=\s*"(?P<label>[^"]+)"'
# RE for variable format
re_format = "[(FORMAT)(format)]\s*=\s*(?P<format>\S+)\s"
# RE for variable position
re_length = "\s*(?P<length1>\d*)\s*-\s*(?P<length2>\d*)\s*"
meta = []
for dstr in dat_split:
res_var = re.search(re_var, dstr) # Find variable name in line
res_label = re.search(re_label, dstr) # Find variable label
res_format = re.search(re_format, dstr) # Find variable format
if not (res_var is None or res_label is None or res_format is None):
# Now that we have a verified variable name...
# Find position RE
counts = re.search(res_var.group("variable")+re_length, dat)
l1 = int(counts.group("length1")) # Grab out first position
l2 = int(counts.group("length2")) # Grab out second position
# Add to meta data
meta += [{"variable": res_var.group("variable"),
"label": res_label.group("label"),
"format": res_format.group("format"),
"l1": l1,
"l2": l2,
"l3": l2 - l1 + 1}]
# Get relevant descriptions
names = [z["label"] for z in meta]
lengths = [z["l3"] for z in meta]
del meta
# Use numpy to read fixed width file and write as .csv
data = np.genfromtxt(ascii_name, names=names, delimiter=lengths)
np.savetxt(csv_name, data, delimiter=',',
header=','.join(data.dtype.names))
del data
if remove_orig:
os.remove(sas_name)
os.remove(ascii_name)
def download_unzip_csv_psid(f_name, request_num, session, to_csv=True,
remove_orig=True, verbose=True):
"""
Download a family data set
"""
# Download zip file
if verbose:
print("Downloading %s" % f_name)
x = download_psid(str(request_num), f_name, session)
# Unzip
if verbose:
print("Unzipping %s" % f_name)
sas_name, ascii_name = psid_unzip(f_name)
if to_csv:
if verbose:
print("Converting %s to csv" % ascii_name)
# generate csv_name and convert to csv
csv_name = f_name.strip(".zip") + ".csv"
sascii2csv(sas_name, ascii_name, csv_name, remove_orig=remove_orig)
if remove_orig:
os.remove(f_name)
gc.collect()
def download_all_family_data(session, to_csv=True, **kwargs):
"""
Download all family data sets
"""
for (fy, rn) in file_lookup.copy().pop("ind").items():
fn = "FAM" + fy + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=to_csv, **kwargs)
return
def download_ind_cross_year(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("IND2011ER.zip", str(1053), session,
to_csv=to_csv, **kwargs)
return
def download_parentfile(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("PID2011ER.zip", str(1123), session,
to_csv=to_csv, **kwargs)
return
def download_all_data(session, to_csv=True, **kwargs):
"""
Call the download ind and download all family functions
"""
download_ind_cross_year(session, to_csv=True, **kwargs)
download_all_family_data(session, to_csv=True, **kwargs)
return
# -------- #
# Cleaning #
# -------- #
def clean_indfile_names(df):
"""
Most of the columns in the PSID individual file have many
underscores in between the variable name and the year. The next few
lines remove those cases and re- assigns the column names.
This is necessary for us to save that data to hdf in table format
"""
cols = pd.Series(df.columns, dtype=str)
c2 = cols.str.extract("(.+?)__+(\d\d)")
cols2 = c2[0] + c2[1]
cols2 = cols2.fillna(cols)
df.cols = cols2
return df
def csv2hdf(csv_fn, hdf_fn, hdf_gn=None, hdf_mode="a",
extra_func=None):
"""
Move the file csv_fn to an HDF file.
Parameters
----------
csv_fn : string
The file name for the csv
hdf_fn: string
The name of the hdf file to write to
hdf_gn: string, optional
A string specifying the `path` to the group to contain the
dataset. If none is given, the data set is saved to `/fn`, where
fn is the root of csv_fn
hdf_mode: string, optional(default="a")
The open mode for the hdf file. Default is append
extra_func: function, optional(default=None)
An extra function the user can supply to clean or otherwise
alter the data set after reading in from csv, but before saving
to hdf
Returns
-------
None
Notes
-----
This function tries to write the data set in table form, but if it
cannot it will fallback to writing in fixed form.
For a discussion on the differences see the pandas manual
"""
df = pd.read_csv(csv_fn)
if extra_func is not None:
df = extra_func(df)
if hdf_gn is None:
# split to path/file then chop last 4 characters off (`.csv`)
hdf_gn = os.path.split(csv_fn)[1][:-4]
try:
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="table",
complib="blosc")
print("Added %s to %s" % (hdf_gn, hdf_fn))
except:
print("WARN: Couldn't store %s as table. Using fixed" % hdf_gn)
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="fixed",
complib="blosc")
return
def _convert_to_4_digit_year(yr):
print("recieved yr: %s" % yr)
if len(yr) == 4:
return yr
if len(yr) == 1:
return "200" + yr
if len(yr) == 3:
raise ValueError("Can't parse three digit year")
iy = int(yr)
if 0 <= iy <= 9: # 200x
return "20" + yr
elif 10 < iy <= int(str(datetime.datetime.now().year)[2:]):
return "20" + yr
else: # assuming in 1900's
return "19" + yr
if __name__ == '__main__':
import glob
import argparse
import datetime
from textwrap import dedent
d_help = dedent("""\
Download the specified data file. If argument begins with a, all files
will be downloaded. If it begins with i, only the cross-year individual
file will be downloaded. If it is of the form fYY or fYYYY then only the
family file for the given year will be downloaded
""")
# create parser and add arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--download",
help=d_help)
parser.add_argument("--hdf",
help="Convert csv files to hdf named PSID.hdf",
action="store_true")
parser.add_argument("-u", "--username",
help="Specify username for PSID website")
parser.add_argument("-p", "--password",
help="Specify password for PSID website")
args = parser.parse_args()
# Handle download arg
if args.download:
# make sure we have a user_name and password
if args.username is None or args.password is None:
msg = dedent("""\
Must supply username and password. Example syntax:
`python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`
If you don't yet have an account, go to http://simba.isr.umich.edu
and create one
""")
raise ValueError(msg)
a = args.download
session = start_psid_session(user=args.username,
password=args.password)
if a.startswith("a"): # download all
download_all_data(session)
elif a.startswith("i"): # download individual file
download_ind_cross_year(session, to_csv=True)
elif a.startswith("p"): # download parent id file
download_parentfile(session, to_csv=True)
else:
# download single family file
m = re.match("f?(\d+)", a.lower())
if m is not None:
yr = m.groups()[0]
yr = _convert_to_4_digit_year(yr)
rn = file_lookup[yr]
fn = "FAM" + | |
def LogError(self, *args): return _pysmile_impl.DSL_errorStringHandler_LogError(self, *args)
def GetError(self, *args): return _pysmile_impl.DSL_errorStringHandler_GetError(self, *args)
def GetLastError(self): return _pysmile_impl.DSL_errorStringHandler_GetLastError(self)
def GetErrorMessage(self, *args): return _pysmile_impl.DSL_errorStringHandler_GetErrorMessage(self, *args)
def GetLastErrorMessage(self): return _pysmile_impl.DSL_errorStringHandler_GetLastErrorMessage(self)
def GetNumberOfErrors(self): return _pysmile_impl.DSL_errorStringHandler_GetNumberOfErrors(self)
def Flush(self): return _pysmile_impl.DSL_errorStringHandler_Flush(self)
def RedirectToFile(self, *args): return _pysmile_impl.DSL_errorStringHandler_RedirectToFile(self, *args)
def Redirect(self, *args): return _pysmile_impl.DSL_errorStringHandler_Redirect(self, *args)
DSL_errorStringHandler_swigregister = _pysmile_impl.DSL_errorStringHandler_swigregister
DSL_errorStringHandler_swigregister(DSL_errorStringHandler)
class DSL_progress(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_progress, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, DSL_progress, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def Tick(self, *args): return _pysmile_impl.DSL_progress_Tick(self, *args)
__swig_destroy__ = _pysmile_impl.delete_DSL_progress
__del__ = lambda self : None;
DSL_progress_swigregister = _pysmile_impl.DSL_progress_swigregister
DSL_progress_swigregister(DSL_progress)
cvar = _pysmile_impl.cvar
DSL_CPT_MIN_STATES = _pysmile_impl.DSL_CPT_MIN_STATES
DSL_CPT_EPSILON = _pysmile_impl.DSL_CPT_EPSILON
class DSL_cpt(DSL_nodeDefinition):
__swig_setmethods__ = {}
for _s in [DSL_nodeDefinition]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_cpt, name, value)
__swig_getmethods__ = {}
for _s in [DSL_nodeDefinition]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, DSL_cpt, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _pysmile_impl.new_DSL_cpt(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pysmile_impl.delete_DSL_cpt
__del__ = lambda self : None;
def GetType(self): return _pysmile_impl.DSL_cpt_GetType(self)
def GetTypeName(self): return _pysmile_impl.DSL_cpt_GetTypeName(self)
def GetStatesNames(self): return _pysmile_impl.DSL_cpt_GetStatesNames(self)
def GetProbabilities(self): return _pysmile_impl.DSL_cpt_GetProbabilities(self)
def GetTemporalProbabilities(self, *args): return _pysmile_impl.DSL_cpt_GetTemporalProbabilities(self, *args)
def SetTemporalProbabilities(self, *args): return _pysmile_impl.DSL_cpt_SetTemporalProbabilities(self, *args)
def GetSize(self): return _pysmile_impl.DSL_cpt_GetSize(self)
def GetNumberOfStates(self): return _pysmile_impl.DSL_cpt_GetNumberOfStates(self)
def SetProbabilities(self, *args): return _pysmile_impl.DSL_cpt_SetProbabilities(self, *args)
def RenameState(self, *args): return _pysmile_impl.DSL_cpt_RenameState(self, *args)
def RenameStates(self, *args): return _pysmile_impl.DSL_cpt_RenameStates(self, *args)
def NextCoordinates(self, *args): return _pysmile_impl.DSL_cpt_NextCoordinates(self, *args)
def PrevCoordinates(self, *args): return _pysmile_impl.DSL_cpt_PrevCoordinates(self, *args)
def AddState(self, *args): return _pysmile_impl.DSL_cpt_AddState(self, *args)
def InsertState(self, *args): return _pysmile_impl.DSL_cpt_InsertState(self, *args)
def RemoveState(self, *args): return _pysmile_impl.DSL_cpt_RemoveState(self, *args)
def SetNumberOfStates(self, *args): return _pysmile_impl.DSL_cpt_SetNumberOfStates(self, *args)
def ChangeOrderOfStates(self, *args): return _pysmile_impl.DSL_cpt_ChangeOrderOfStates(self, *args)
def AddParent(self, *args): return _pysmile_impl.DSL_cpt_AddParent(self, *args)
def AbsorbEvidenceFromParent(self, *args): return _pysmile_impl.DSL_cpt_AbsorbEvidenceFromParent(self, *args)
def MarginalizeParent(self, *args): return _pysmile_impl.DSL_cpt_MarginalizeParent(self, *args)
def RemoveParent(self, *args): return _pysmile_impl.DSL_cpt_RemoveParent(self, *args)
def DaddyGetsBigger(self, *args): return _pysmile_impl.DSL_cpt_DaddyGetsBigger(self, *args)
def DaddyGetsSmaller(self, *args): return _pysmile_impl.DSL_cpt_DaddyGetsSmaller(self, *args)
def DaddyChangedOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_cpt_DaddyChangedOrderOfOutcomes(self, *args)
def OrderOfParentsGetsChanged(self, *args): return _pysmile_impl.DSL_cpt_OrderOfParentsGetsChanged(self, *args)
def SetProbability(self, *args): return _pysmile_impl.DSL_cpt_SetProbability(self, *args)
def GetProbability(self, *args): return _pysmile_impl.DSL_cpt_GetProbability(self, *args)
def CleanUp(self, deep = 0): return _pysmile_impl.DSL_cpt_CleanUp(self, deep)
def CheckReadiness(self, deep = 0): return _pysmile_impl.DSL_cpt_CheckReadiness(self, deep)
def CheckConsistency(self, deep = 0): return _pysmile_impl.DSL_cpt_CheckConsistency(self, deep)
def CreateUniqueStateName(self, *args): return _pysmile_impl.DSL_cpt_CreateUniqueStateName(self, *args)
def ReCreateFromNetworkStructure(self): return _pysmile_impl.DSL_cpt_ReCreateFromNetworkStructure(self)
def Clone(self, *args): return _pysmile_impl.DSL_cpt_Clone(self, *args)
def CheckParentsStructure(self): return _pysmile_impl.DSL_cpt_CheckParentsStructure(self)
def AddOutcome(self, *args): return _pysmile_impl.DSL_cpt_AddOutcome(self, *args)
def InsertOutcome(self, *args): return _pysmile_impl.DSL_cpt_InsertOutcome(self, *args)
def RemoveOutcome(self, *args): return _pysmile_impl.DSL_cpt_RemoveOutcome(self, *args)
def GetNumberOfOutcomes(self): return _pysmile_impl.DSL_cpt_GetNumberOfOutcomes(self)
def RenameOutcome(self, *args): return _pysmile_impl.DSL_cpt_RenameOutcome(self, *args)
def RenameOutcomes(self, *args): return _pysmile_impl.DSL_cpt_RenameOutcomes(self, *args)
def GetOutcomesNames(self): return _pysmile_impl.DSL_cpt_GetOutcomesNames(self)
def SetNumberOfOutcomes(self, *args): return _pysmile_impl.DSL_cpt_SetNumberOfOutcomes(self, *args)
def ChangeOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_cpt_ChangeOrderOfOutcomes(self, *args)
def GetDefinition(self, *args): return _pysmile_impl.DSL_cpt_GetDefinition(self, *args)
def SetDefinition(self, *args): return _pysmile_impl.DSL_cpt_SetDefinition(self, *args)
def MakeUniform(self): return _pysmile_impl.DSL_cpt_MakeUniform(self)
DSL_cpt_swigregister = _pysmile_impl.DSL_cpt_swigregister
DSL_cpt_swigregister(DSL_cpt)
class DSL_truthTable(DSL_cpt):
__swig_setmethods__ = {}
for _s in [DSL_cpt]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_truthTable, name, value)
__swig_getmethods__ = {}
for _s in [DSL_cpt]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, DSL_truthTable, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _pysmile_impl.new_DSL_truthTable(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pysmile_impl.delete_DSL_truthTable
__del__ = lambda self : None;
def DaddyGetsBigger(self, *args): return _pysmile_impl.DSL_truthTable_DaddyGetsBigger(self, *args)
def GetType(self): return _pysmile_impl.DSL_truthTable_GetType(self)
def GetTypeName(self): return _pysmile_impl.DSL_truthTable_GetTypeName(self)
def SetProbability(self, *args): return _pysmile_impl.DSL_truthTable_SetProbability(self, *args)
def SetResultingState(self, *args): return _pysmile_impl.DSL_truthTable_SetResultingState(self, *args)
def SetResultingStates(self, *args): return _pysmile_impl.DSL_truthTable_SetResultingStates(self, *args)
def GetResultingState(self, *args): return _pysmile_impl.DSL_truthTable_GetResultingState(self, *args)
def GetResultingStates(self, *args): return _pysmile_impl.DSL_truthTable_GetResultingStates(self, *args)
def GetTemporalResultingStates(self, *args): return _pysmile_impl.DSL_truthTable_GetTemporalResultingStates(self, *args)
def SetTemporalResultingStates(self, *args): return _pysmile_impl.DSL_truthTable_SetTemporalResultingStates(self, *args)
def CheckConsistency(self, deep = 0): return _pysmile_impl.DSL_truthTable_CheckConsistency(self, deep)
def Clone(self, *args): return _pysmile_impl.DSL_truthTable_Clone(self, *args)
def SetDefinition(self, *args): return _pysmile_impl.DSL_truthTable_SetDefinition(self, *args)
def MakeUniform(self): return _pysmile_impl.DSL_truthTable_MakeUniform(self)
DSL_truthTable_swigregister = _pysmile_impl.DSL_truthTable_swigregister
DSL_truthTable_swigregister(DSL_truthTable)
DSL_NOISYMAX_SQUARE_DISTANCE = _pysmile_impl.DSL_NOISYMAX_SQUARE_DISTANCE
DSL_NOISYMAX_KL_DISTANCE = _pysmile_impl.DSL_NOISYMAX_KL_DISTANCE
class DSL_noisyMAX(DSL_ciDefinition):
__swig_setmethods__ = {}
for _s in [DSL_ciDefinition]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_noisyMAX, name, value)
__swig_getmethods__ = {}
for _s in [DSL_ciDefinition]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, DSL_noisyMAX, name)
__repr__ = _swig_repr
def GetParentOutcomeStrengths(self, *args): return _pysmile_impl.DSL_noisyMAX_GetParentOutcomeStrengths(self, *args)
def GetNumOfParentOutcomes(self, *args): return _pysmile_impl.DSL_noisyMAX_GetNumOfParentOutcomes(self, *args)
def GetStrengthOfOutcome(self, *args): return _pysmile_impl.DSL_noisyMAX_GetStrengthOfOutcome(self, *args)
def GetOutcomeOfStrength(self, *args): return _pysmile_impl.DSL_noisyMAX_GetOutcomeOfStrength(self, *args)
def GetNumberOfParents(self): return _pysmile_impl.DSL_noisyMAX_GetNumberOfParents(self)
def SetParentOutcomeStrengths(self, *args): return _pysmile_impl.DSL_noisyMAX_SetParentOutcomeStrengths(self, *args)
def GetTemporalParentOutcomeStrengths(self, *args): return _pysmile_impl.DSL_noisyMAX_GetTemporalParentOutcomeStrengths(self, *args)
def SetTemporalParentOutcomeStrengths(self, *args): return _pysmile_impl.DSL_noisyMAX_SetTemporalParentOutcomeStrengths(self, *args)
def __init__(self, *args):
this = _pysmile_impl.new_DSL_noisyMAX(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pysmile_impl.delete_DSL_noisyMAX
__del__ = lambda self : None;
def GetType(self): return _pysmile_impl.DSL_noisyMAX_GetType(self)
def GetTypeName(self): return _pysmile_impl.DSL_noisyMAX_GetTypeName(self)
def Clone(self, *args): return _pysmile_impl.DSL_noisyMAX_Clone(self, *args)
def CleanUp(self, deep = 0): return _pysmile_impl.DSL_noisyMAX_CleanUp(self, deep)
def ReCreateFromNetworkStructure(self): return _pysmile_impl.DSL_noisyMAX_ReCreateFromNetworkStructure(self)
def GetParentStartingPosition(self, *args): return _pysmile_impl.DSL_noisyMAX_GetParentStartingPosition(self, *args)
def CheckConsistency(self, deep = 0): return _pysmile_impl.DSL_noisyMAX_CheckConsistency(self, deep)
def AddParent(self, *args): return _pysmile_impl.DSL_noisyMAX_AddParent(self, *args)
def RemoveParent(self, *args): return _pysmile_impl.DSL_noisyMAX_RemoveParent(self, *args)
def DaddyGetsBigger(self, *args): return _pysmile_impl.DSL_noisyMAX_DaddyGetsBigger(self, *args)
def DaddyGetsSmaller(self, *args): return _pysmile_impl.DSL_noisyMAX_DaddyGetsSmaller(self, *args)
def DaddyChangedOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_noisyMAX_DaddyChangedOrderOfOutcomes(self, *args)
def OrderOfParentsGetsChanged(self, *args): return _pysmile_impl.DSL_noisyMAX_OrderOfParentsGetsChanged(self, *args)
def ChangeOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_noisyMAX_ChangeOrderOfOutcomes(self, *args)
def ChangeOrderOfStrengths(self, *args): return _pysmile_impl.DSL_noisyMAX_ChangeOrderOfStrengths(self, *args)
def SetDefinition(self, *args): return _pysmile_impl.DSL_noisyMAX_SetDefinition(self, *args)
def CheckCiWeightsConsistency(self, *args): return _pysmile_impl.DSL_noisyMAX_CheckCiWeightsConsistency(self, *args)
def GetHenrionProbabilities(self, *args): return _pysmile_impl.DSL_noisyMAX_GetHenrionProbabilities(self, *args)
def SetHenrionProbabilities(self, *args): return _pysmile_impl.DSL_noisyMAX_SetHenrionProbabilities(self, *args)
def CheckHenrionConsistency(self, *args): return _pysmile_impl.DSL_noisyMAX_CheckHenrionConsistency(self, *args)
def CiToCpt(self, *args): return _pysmile_impl.DSL_noisyMAX_CiToCpt(self, *args)
def CptToCi(self): return _pysmile_impl.DSL_noisyMAX_CptToCi(self)
def CiIndexConstrained(self, *args): return _pysmile_impl.DSL_noisyMAX_CiIndexConstrained(self, *args)
def IsNonZero(self, *args): return _pysmile_impl.DSL_noisyMAX_IsNonZero(self, *args)
def SquareDistance(self, *args): return _pysmile_impl.DSL_noisyMAX_SquareDistance(self, *args)
def SquareCptToCi(self, *args): return _pysmile_impl.DSL_noisyMAX_SquareCptToCi(self, *args)
def KLDistance(self, *args): return _pysmile_impl.DSL_noisyMAX_KLDistance(self, *args)
def KLCptToCi(self, *args): return _pysmile_impl.DSL_noisyMAX_KLCptToCi(self, *args)
def SetLegacyNoisyOrProbabilities(self, *args): return _pysmile_impl.DSL_noisyMAX_SetLegacyNoisyOrProbabilities(self, *args)
def GetLegacyNoisyOrProbabilities(self, *args): return _pysmile_impl.DSL_noisyMAX_GetLegacyNoisyOrProbabilities(self, *args)
def CalculateCptColumn(self, *args): return _pysmile_impl.DSL_noisyMAX_CalculateCptColumn(self, *args)
def GetCpt(self): return _pysmile_impl.DSL_noisyMAX_GetCpt(self)
def AbsorbEvidenceFromParent(self, *args): return _pysmile_impl.DSL_noisyMAX_AbsorbEvidenceFromParent(self, *args)
def MarginalizeParent(self, *args): return _pysmile_impl.DSL_noisyMAX_MarginalizeParent(self, *args)
DSL_noisyMAX_swigregister = _pysmile_impl.DSL_noisyMAX_swigregister
DSL_noisyMAX_swigregister(DSL_noisyMAX)
DSL_NOISYADDER_MIN_STATES = _pysmile_impl.DSL_NOISYADDER_MIN_STATES
class DSL_noisyAdder(DSL_ciDefinition):
__swig_setmethods__ = {}
for _s in [DSL_ciDefinition]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_noisyAdder, name, value)
__swig_getmethods__ = {}
for _s in [DSL_ciDefinition]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, DSL_noisyAdder, name)
__repr__ = _swig_repr
fun_average = _pysmile_impl.DSL_noisyAdder_fun_average
fun_single_fault = _pysmile_impl.DSL_noisyAdder_fun_single_fault
def __init__(self, *args):
this = _pysmile_impl.new_DSL_noisyAdder(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pysmile_impl.delete_DSL_noisyAdder
__del__ = lambda self : None;
def GetType(self): return _pysmile_impl.DSL_noisyAdder_GetType(self)
def GetTypeName(self): return _pysmile_impl.DSL_noisyAdder_GetTypeName(self)
def ReCreateFromNetworkStructure(self): return _pysmile_impl.DSL_noisyAdder_ReCreateFromNetworkStructure(self)
def GetParentStartingPosition(self, *args): return _pysmile_impl.DSL_noisyAdder_GetParentStartingPosition(self, *args)
def Clone(self, *args): return _pysmile_impl.DSL_noisyAdder_Clone(self, *args)
def CheckConsistency(self, deep = 0): return _pysmile_impl.DSL_noisyAdder_CheckConsistency(self, deep)
def GetNumberOfParents(self): return _pysmile_impl.DSL_noisyAdder_GetNumberOfParents(self)
def GetNumOfParentOutcomes(self, *args): return _pysmile_impl.DSL_noisyAdder_GetNumOfParentOutcomes(self, *args)
def AddParent(self, *args): return _pysmile_impl.DSL_noisyAdder_AddParent(self, *args)
def RemoveParent(self, *args): return _pysmile_impl.DSL_noisyAdder_RemoveParent(self, *args)
def DaddyGetsBigger(self, *args): return _pysmile_impl.DSL_noisyAdder_DaddyGetsBigger(self, *args)
def DaddyGetsSmaller(self, *args): return _pysmile_impl.DSL_noisyAdder_DaddyGetsSmaller(self, *args)
def DaddyChangedOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_noisyAdder_DaddyChangedOrderOfOutcomes(self, *args)
def OrderOfParentsGetsChanged(self, *args): return _pysmile_impl.DSL_noisyAdder_OrderOfParentsGetsChanged(self, *args)
def ChangeOrderOfOutcomes(self, *args): return _pysmile_impl.DSL_noisyAdder_ChangeOrderOfOutcomes(self, *args)
def SetDefinition(self, *args): return _pysmile_impl.DSL_noisyAdder_SetDefinition(self, *args)
def CheckCiWeightsConsistency(self, *args): return _pysmile_impl.DSL_noisyAdder_CheckCiWeightsConsistency(self, *args)
def CiToCpt(self, *args): return _pysmile_impl.DSL_noisyAdder_CiToCpt(self, *args)
def AddOutcome(self, *args): return _pysmile_impl.DSL_noisyAdder_AddOutcome(self, *args)
def InsertOutcome(self, *args): return _pysmile_impl.DSL_noisyAdder_InsertOutcome(self, *args)
def RemoveOutcome(self, *args): return _pysmile_impl.DSL_noisyAdder_RemoveOutcome(self, *args)
def SetNumberOfOutcomes(self, *args): return _pysmile_impl.DSL_noisyAdder_SetNumberOfOutcomes(self, *args)
def CptToCi(self): return _pysmile_impl.DSL_noisyAdder_CptToCi(self)
def GetCpt(self): return _pysmile_impl.DSL_noisyAdder_GetCpt(self)
def CiIndexConstrained(self, *args): return _pysmile_impl.DSL_noisyAdder_CiIndexConstrained(self, *args)
def GetDistinguishedState(self): return _pysmile_impl.DSL_noisyAdder_GetDistinguishedState(self)
def GetParentDistinguishedState(self, *args): return _pysmile_impl.DSL_noisyAdder_GetParentDistinguishedState(self, *args)
def GetParentWeight(self, *args): return _pysmile_impl.DSL_noisyAdder_GetParentWeight(self, *args)
def ParentWeights(self): return _pysmile_impl.DSL_noisyAdder_ParentWeights(self)
def ParentDistinguishedStates(self): return _pysmile_impl.DSL_noisyAdder_ParentDistinguishedStates(self)
def SetDistinguishedState(self, *args): return _pysmile_impl.DSL_noisyAdder_SetDistinguishedState(self, *args)
def SetParentDistinguishedState(self, *args): return _pysmile_impl.DSL_noisyAdder_SetParentDistinguishedState(self, *args)
def SetParentWeight(self, *args): return _pysmile_impl.DSL_noisyAdder_SetParentWeight(self, *args)
def SetFunction(self, *args): return _pysmile_impl.DSL_noisyAdder_SetFunction(self, *args)
def GetFunction(self): return _pysmile_impl.DSL_noisyAdder_GetFunction(self)
def GetTemporalFunction(self, *args): return _pysmile_impl.DSL_noisyAdder_GetTemporalFunction(self, *args)
def SetTemporalFunction(self, *args): return _pysmile_impl.DSL_noisyAdder_SetTemporalFunction(self, *args)
def GetTemporalDistinguishedState(self, *args): return _pysmile_impl.DSL_noisyAdder_GetTemporalDistinguishedState(self, *args)
def SetTemporalDistinguishedState(self, *args): return _pysmile_impl.DSL_noisyAdder_SetTemporalDistinguishedState(self, *args)
def GetTemporalParentInfo(self, *args): return _pysmile_impl.DSL_noisyAdder_GetTemporalParentInfo(self, *args)
def SetTemporalParentInfo(self, *args): return _pysmile_impl.DSL_noisyAdder_SetTemporalParentInfo(self, *args)
DSL_noisyAdder_swigregister = _pysmile_impl.DSL_noisyAdder_swigregister
DSL_noisyAdder_swigregister(DSL_noisyAdder)
DSL_LIST_MIN_CHOICES = _pysmile_impl.DSL_LIST_MIN_CHOICES
class DSL_list(DSL_nodeDefinition):
__swig_setmethods__ = {}
for _s in [DSL_nodeDefinition]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, DSL_list, name, value)
__swig_getmethods__ = {}
for _s in [DSL_nodeDefinition]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, DSL_list, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _pysmile_impl.new_DSL_list(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _pysmile_impl.delete_DSL_list
__del__ = lambda self : None;
def GetType(self): return _pysmile_impl.DSL_list_GetType(self)
def GetTypeName(self): return _pysmile_impl.DSL_list_GetTypeName(self)
def GetChoicesNames(self): return _pysmile_impl.DSL_list_GetChoicesNames(self)
def GetChoicesValues(self): return _pysmile_impl.DSL_list_GetChoicesValues(self)
def GetSize(self): return _pysmile_impl.DSL_list_GetSize(self)
def GetNumberOfChoices(self): return _pysmile_impl.DSL_list_GetNumberOfChoices(self)
def RenameChoice(self, *args): return _pysmile_impl.DSL_list_RenameChoice(self, | |
<reponame>wcastello/splunk-sdk-python<gh_stars>1-10
#!/usr/bin/env python
# coding=utf-8
#
# Copyright 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from splunklib import six
from splunklib.searchcommands import Configuration, StreamingCommand
from splunklib.searchcommands.decorators import ConfigurationSetting, Option
from splunklib.searchcommands.search_command import SearchCommand
from splunklib.client import Service
from splunklib.six.moves import StringIO
from splunklib.six.moves import zip as izip
from json.encoder import encode_basestring as encode_string
from unittest import main, TestCase
import csv
import os
import re
@Configuration()
class TestCommand(SearchCommand):
required_option_1 = Option(require=True)
required_option_2 = Option(require=True)
def echo(self, records):
for record in records:
if record.get('action') == 'raise_exception':
if six.PY2:
raise StandardError(self)
else:
raise Exception(self)
yield record
def _execute(self, ifile, process):
SearchCommand._execute(self, ifile, self.echo)
class ConfigurationSettings(SearchCommand.ConfigurationSettings):
# region SCP v1/v2 properties
generating = ConfigurationSetting()
required_fields = ConfigurationSetting()
streaming_preop = ConfigurationSetting()
# endregion
# region SCP v1 properties
clear_required_fields = ConfigurationSetting()
generates_timeorder = ConfigurationSetting()
overrides_timeorder = ConfigurationSetting()
requires_preop = ConfigurationSetting()
retainsevents = ConfigurationSetting()
streaming = ConfigurationSetting()
# endregion
# region SCP v2 properties
distributed = ConfigurationSetting()
maxinputs = ConfigurationSetting()
run_in_preview = ConfigurationSetting()
type = ConfigurationSetting()
# endregion
@Configuration()
class TestStreamingCommand(StreamingCommand):
def stream(self, records):
serial_number = 0
for record in records:
action = record['action']
if action == 'raise_error':
raise RuntimeError('Testing')
value = self.search_results_info if action == 'get_search_results_info' else None
yield {'_serial': serial_number, 'data': value}
serial_number += 1
return
class TestSearchCommand(TestCase):
def setUp(self):
TestCase.setUp(self)
def test_process_scpv1(self):
# TestCommand.process should complain if supports_getinfo == False
# We support dynamic configuration, not static
# The exception line number may change, so we're using a regex match instead of a string match
expected = re.compile(
r'error_message=RuntimeError at ".+search_command\.py", line \d\d\d : Command test appears to be '
r'statically configured for search command protocol version 1 and static configuration is unsupported by '
r'splunklib.searchcommands. Please ensure that default/commands.conf contains this stanza:\n'
r'\[test\]\n'
r'filename = test.py\n'
r'enableheader = true\n'
r'outputheader = true\n'
r'requires_srinfo = true\n'
r'supports_getinfo = true\n'
r'supports_multivalues = true\n'
r'supports_rawargs = true')
argv = ['test.py', 'not__GETINFO__or__EXECUTE__', 'option=value', 'fieldname']
command = TestCommand()
result = StringIO()
self.assertRaises(SystemExit, command.process, argv, ofile=result)
self.assertRegexpMatches(result.getvalue(), expected)
# TestCommand.process should return configuration settings on Getinfo probe
argv = ['test.py', '__GETINFO__', 'required_option_1=value', 'required_option_2=value']
command = TestCommand()
ifile = StringIO('\n')
result = StringIO()
self.assertEqual(str(command.configuration), '')
if six.PY2:
expected = ("[(u'clear_required_fields', None, [1]), (u'distributed', None, [2]), (u'generates_timeorder', None, [1]), "
"(u'generating', None, [1, 2]), (u'maxinputs', None, [2]), (u'overrides_timeorder', None, [1]), "
"(u'required_fields', None, [1, 2]), (u'requires_preop', None, [1]), (u'retainsevents', None, [1]), "
"(u'run_in_preview', None, [2]), (u'streaming', None, [1]), (u'streaming_preop', None, [1, 2]), "
"(u'type', None, [2])]")
else:
expected = ("[('clear_required_fields', None, [1]), ('distributed', None, [2]), ('generates_timeorder', None, [1]), "
"('generating', None, [1, 2]), ('maxinputs', None, [2]), ('overrides_timeorder', None, [1]), "
"('required_fields', None, [1, 2]), ('requires_preop', None, [1]), ('retainsevents', None, [1]), "
"('run_in_preview', None, [2]), ('streaming', None, [1]), ('streaming_preop', None, [1, 2]), "
"('type', None, [2])]")
self.assertEqual(
repr(command.configuration), expected)
try:
# noinspection PyTypeChecker
command.process(argv, ifile, ofile=result)
except BaseException as error:
self.fail('{0}: {1}: {2}\n'.format(type(error).__name__, error, result.getvalue()))
self.assertEqual('\r\n\r\n\r\n', result.getvalue()) # No message header and no configuration settings
ifile = StringIO('\n')
result = StringIO()
# We might also put this sort of code into our SearchCommand.prepare override ...
configuration = command.configuration
# SCP v1/v2 configuration settings
configuration.generating = True
configuration.required_fields = ['foo', 'bar']
configuration.streaming_preop = 'some streaming command'
# SCP v1 configuration settings
configuration.clear_required_fields = True
configuration.generates_timeorder = True
configuration.overrides_timeorder = True
configuration.requires_preop = True
configuration.retainsevents = True
configuration.streaming = True
# SCP v2 configuration settings (SCP v1 requires that maxinputs and run_in_preview are set in commands.conf)
configuration.distributed = True
configuration.maxinputs = 50000
configuration.run_in_preview = True
configuration.type = 'streaming'
if six.PY2:
expected = ('clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
'required_fields="[u\'foo\', u\'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
'streaming_preop="some streaming command"')
else:
expected = ('clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
'required_fields="[\'foo\', \'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
'streaming_preop="some streaming command"')
self.assertEqual(str(command.configuration), expected)
if six.PY2:
expected = ("[(u'clear_required_fields', True, [1]), (u'distributed', True, [2]), (u'generates_timeorder', True, [1]), "
"(u'generating', True, [1, 2]), (u'maxinputs', 50000, [2]), (u'overrides_timeorder', True, [1]), "
"(u'required_fields', [u'foo', u'bar'], [1, 2]), (u'requires_preop', True, [1]), "
"(u'retainsevents', True, [1]), (u'run_in_preview', True, [2]), (u'streaming', True, [1]), "
"(u'streaming_preop', u'some streaming command', [1, 2]), (u'type', u'streaming', [2])]")
else:
expected = ("[('clear_required_fields', True, [1]), ('distributed', True, [2]), ('generates_timeorder', True, [1]), "
"('generating', True, [1, 2]), ('maxinputs', 50000, [2]), ('overrides_timeorder', True, [1]), "
"('required_fields', ['foo', 'bar'], [1, 2]), ('requires_preop', True, [1]), "
"('retainsevents', True, [1]), ('run_in_preview', True, [2]), ('streaming', True, [1]), "
"('streaming_preop', 'some streaming command', [1, 2]), ('type', 'streaming', [2])]")
self.assertEqual(
repr(command.configuration), expected)
try:
# noinspection PyTypeChecker
command.process(argv, ifile, ofile=result)
except BaseException as error:
self.fail('{0}: {1}: {2}\n'.format(type(error).__name__, error, result.getvalue()))
result.seek(0)
reader = csv.reader(result)
self.assertEqual([], next(reader))
observed = dict(izip(next(reader), next(reader)))
self.assertRaises(StopIteration, lambda: next(reader))
expected = {
'clear_required_fields': '1', '__mv_clear_required_fields': '',
'generating': '1', '__mv_generating': '',
'generates_timeorder': '1', '__mv_generates_timeorder': '',
'overrides_timeorder': '1', '__mv_overrides_timeorder': '',
'requires_preop': '1', '__mv_requires_preop': '',
'required_fields': 'foo,bar', '__mv_required_fields': '',
'retainsevents': '1', '__mv_retainsevents': '',
'streaming': '1', '__mv_streaming': '',
'streaming_preop': 'some streaming command', '__mv_streaming_preop': '',
}
self.assertDictEqual(expected, observed) # No message header and no configuration settings
for action in '__GETINFO__', '__EXECUTE__':
# TestCommand.process should produce an error record on parser errors
argv = [
'test.py', action, 'required_option_1=value', 'required_option_2=value', 'undefined_option=value',
'fieldname_1', 'fieldname_2']
command = TestCommand()
ifile = StringIO('\n')
result = StringIO()
self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)
self.assertTrue(
'error_message=Unrecognized test command option: undefined_option="value"\r\n\r\n',
result.getvalue())
# TestCommand.process should produce an error record when required options are missing
argv = ['test.py', action, 'required_option_2=value', 'fieldname_1']
command = TestCommand()
ifile = StringIO('\n')
result = StringIO()
self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)
self.assertTrue(
'error_message=A value for test command option required_option_1 is required\r\n\r\n',
result.getvalue())
argv = ['test.py', action, 'fieldname_1']
command = TestCommand()
ifile = StringIO('\n')
result = StringIO()
self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)
self.assertTrue(
'error_message=Values for these test command options are required: required_option_1, required_option_2'
'\r\n\r\n',
result.getvalue())
# TestStreamingCommand.process should exit on processing exceptions
ifile = StringIO('\naction\r\nraise_error\r\n')
argv = ['test.py', '__EXECUTE__']
command = TestStreamingCommand()
result = StringIO()
try:
# noinspection PyTypeChecker
command.process(argv, ifile, ofile=result)
except SystemExit as error:
self.assertNotEqual(error.code, 0)
self.assertRegexpMatches(
result.getvalue(),
r'^error_message=RuntimeError at ".+", line \d+ : Testing\r\n\r\n$')
except BaseException as error:
self.fail('Expected SystemExit, but caught {}: {}'.format(type(error).__name__, error))
else:
self.fail('Expected SystemExit, but no exception was raised')
# Command.process should provide access to search results info
info_path = os.path.join(
self._package_directory, 'recordings', 'scpv1', 'Splunk-6.3', 'countmatches.execute.dispatch_dir',
'externSearchResultsInfo.csv')
ifile = StringIO('infoPath:' + info_path + '\n\naction\r\nget_search_results_info\r\n')
argv = ['test.py', '__EXECUTE__']
command = TestStreamingCommand()
result = StringIO()
try:
# noinspection PyTypeChecker
command.process(argv, ifile, ofile=result)
except BaseException as error:
self.fail('Expected no exception, but caught {}: {}'.format(type(error).__name__, error))
else:
self.assertRegexpMatches(
result.getvalue(),
r'^\r\n'
r'('
r'data,__mv_data,_serial,__mv__serial\r\n'
r'"\{.*u\'is_summary_index\': 0, .+\}",,0,'
r'|'
r'_serial,__mv__serial,data,__mv_data\r\n'
r'0,,"\{.*u\'is_summary_index\': 0, .+\}",'
r')'
r'\r\n$'
)
# TestStreamingCommand.process should provide access to a service object when search results info is available
self.assertIsInstance(command.service, Service)
self.assertEqual(command.service.authority,
command.search_results_info.splunkd_uri)
self.assertEqual(command.service.scheme,
command.search_results_info.splunkd_protocol)
self.assertEqual(command.service.port,
command.search_results_info.splunkd_port)
self.assertEqual(command.service.token,
command.search_results_info.auth_token)
self.assertEqual(command.service.namespace.app,
command.search_results_info.ppc_app)
self.assertEqual(command.service.namespace.owner,
None)
self.assertEqual(command.service.namespace.sharing,
None)
# Command.process should not provide access to search results info or a service object when the 'infoPath'
# input header is unavailable
ifile = StringIO('\naction\r\nget_search_results_info')
argv = ['teststreaming.py', '__EXECUTE__']
command = TestStreamingCommand()
# noinspection PyTypeChecker
command.process(argv, ifile, ofile=result)
self.assertIsNone(command.search_results_info)
self.assertIsNone(command.service)
return
def test_process_scpv2(self):
# SearchCommand.process should
# 1. Recognize all standard options:
metadata = (
'{{'
'"action": "getinfo", "preview": false, "searchinfo": {{'
'"latest_time": "0",'
'"splunk_version": "20150522",'
'"username": "admin",'
'"app": "searchcommands_app",'
'"args": ['
'"logging_configuration={logging_configuration}",'
'"logging_level={logging_level}",'
'"record={record}",'
'"show_configuration={show_configuration}",'
'"required_option_1=value_1",'
'"required_option_2=value_2"'
'],'
'"search": "A%7C%20inputlookup%20tweets%20%7C%20countmatches%20fieldname%3Dword_count%20pattern%3D%22%5Cw%2B%22%20text%20record%3Dt%20%7C%20export%20add_timestamp%3Df%20add_offset%3Dt%20format%3Dcsv%20segmentation%3Draw",'
'"earliest_time": "0",'
'"session_key": "<KEY>
'"owner": "admin",'
'"sid": "1433261372.158",'
'"splunkd_uri": "https://127.0.0.1:8089",'
'"dispatch_dir": {dispatch_dir},'
'"raw_args": ['
'"logging_configuration={logging_configuration}",'
'"logging_level={logging_level}",'
'"record={record}",'
'"show_configuration={show_configuration}",'
'"required_option_1=value_1",'
'"required_option_2=value_2"'
'],'
'"maxresultrows": 10,'
'"command": "countmatches"'
'}}'
'}}')
basedir = self._package_directory
default_logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'default', 'logging.conf')
dispatch_dir = os.path.join(basedir, 'recordings', 'scpv2', 'Splunk-6.3', 'countmatches.dispatch_dir')
logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'logging.conf')
logging_level = 'ERROR'
record = False
show_configuration = True
getinfo_metadata = metadata.format(
dispatch_dir=encode_string(dispatch_dir),
logging_configuration=encode_string(logging_configuration)[1:-1],
logging_level=logging_level,
| |
1)
invoked_bulk.pop()
logger.info("Querying VNs by obj_uuids for children+backref fields.")
ret_objs = self._vnc_lib.resource_list('virtual-network',
detail=True, obj_uuids=vn_uuids, fields=['routing_instances',
'virtual_machine_interface_back_refs'])
self.assertEqual(len(invoked_bulk), 1)
invoked_bulk.pop()
ret_ri_uuids = []
ret_vmi_uuids = []
for vn_obj in ret_objs:
ri_children = getattr(vn_obj, 'routing_instances',
'RI children absent')
self.assertNotEqual(ri_children, 'RI children absent')
ret_ri_uuids.extend([ri['uuid'] for ri in ri_children])
vmi_back_refs = getattr(vn_obj,
'virtual_machine_interface_back_refs',
'VMI backrefs absent')
self.assertNotEqual(ri_children, 'VMI backrefs absent')
ret_vmi_uuids.extend([vmi['uuid'] for vmi in vmi_back_refs])
self.assertThat(set(ri_uuids),
Equals(set(ret_ri_uuids) & set(ri_uuids)))
self.assertThat(set(vmi_uuids), Equals(set(ret_vmi_uuids)))
# end test_list_bulk_collection
def test_list_bulk_collection_with_malformed_filters(self):
obj_count = self._vnc_lib.POST_FOR_LIST_THRESHOLD + 1
vn_objs, _, _, _ = self._create_vn_ri_vmi()
vn_uuid = vn_objs[0].uuid
vn_uuids = [vn_uuid] +\
['bad-uuid'] * self._vnc_lib.POST_FOR_LIST_THRESHOLD
try:
results = self._vnc_lib.resource_list('virtual-network',
obj_uuids=vn_uuids)
self.assertEqual(len(results['virtual-networks']), 1)
self.assertEqual(results['virtual-networks'][0]['uuid'], vn_uuid)
except HttpError:
self.fail('Malformed object UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('routing-instance',
parent_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 2)
for ri_obj in results:
self.assertEqual(ri_obj.parent_uuid, vn_uuid)
except HttpError:
self.fail('Malformed parent UUID filter was not ignored')
try:
results = self._vnc_lib.resource_list('virtual-machine-interface',
back_ref_id=vn_uuids,
detail=True)
self.assertEqual(len(results), 1)
vmi_obj = results[0]
self.assertEqual(vmi_obj.get_virtual_network_refs()[0]['uuid'],
vn_uuid)
except HttpError:
self.fail('Malformed back-ref UUID filter was not ignored')
# end class TestBulk
class TestCacheWithMetadata(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestCacheWithMetadata, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadata, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def setUp(self):
self.uuid_cf = self.get_cf( 'config_db_uuid', 'obj_uuid_table')
self.cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
return super(TestCacheWithMetadata, self).setUp()
# end setUp
def create_test_object(self, name=None):
vn_name = name or 'vn-%s' %(self.id())
vn_obj = vnc_api.VirtualNetwork(vn_name)
vn_obj.display_name = 'test-cache-obj'
self._vnc_lib.virtual_network_create(vn_obj)
return vn_obj
# end create_object
def prime_test_object(self, vn_obj):
self._vnc_lib.virtual_networks_list(obj_uuids=[vn_obj.uuid])
return vn_obj
# end prime_test_object
def create_and_prime_test_object(self, name=None):
vn_name = name or 'vn-%s' %(self.id())
return self.prime_test_object(self.create_test_object(vn_name))
# end create_and_prime_test_object
def test_hit_and_fresh(self):
vn_obj = self.create_and_prime_test_object()
uuid_cf = self.uuid_cf
vn_row = uuid_cf.get(vn_obj.uuid, include_timestamp=True)
with uuid_cf.patch_row(vn_obj.uuid,
new_columns={'fq_name': vn_row['fq_name'],
'prop:id_perms': vn_row['prop:id_perms'],
'type': vn_row['type']}):
ret_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid], detail=True)
self.assertEqual(ret_vn_objs[0].display_name, vn_obj.display_name)
# end test_hit_and_fresh
def test_hit_and_stale(self):
vn_obj = self.create_and_prime_test_object()
cache_mgr = self.cache_mgr
self.assertIn(vn_obj.uuid, cache_mgr._cache.keys())
uuid_cf = self.uuid_cf
vn_row = uuid_cf.get(vn_obj.uuid)
with uuid_cf.patches([
('column', (vn_obj.uuid, 'prop:display_name', 'stale-check-name')),
('column', (vn_obj.uuid, 'prop:id_perms', vn_row['prop:id_perms'])),
]):
ret_vn_objs = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid], detail=True)
self.assertEqual(
ret_vn_objs[0].display_name, 'stale-check-name')
# end test_hit_and_stale
def test_miss(self):
vn_obj = self.create_test_object()
cache_mgr = self.cache_mgr
self.assertNotIn(vn_obj.uuid, cache_mgr._cache.keys())
ret_vn_dicts = self._vnc_lib.virtual_networks_list(
obj_uuids=[vn_obj.uuid],
fields=['display_name'])['virtual-networks']
self.assertEqual(ret_vn_dicts[0]['display_name'],
vn_obj.display_name)
# end test_miss
def test_hits_stales_misses(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
vn_hit_fresh_obj = self.create_and_prime_test_object(
'vn-hit-fresh-%s' %(self.id()))
vn_hit_stale_obj = self.create_and_prime_test_object(
'vn-hit-stale-%s' %(self.id()))
vn_miss_obj = self.create_test_object('vn-miss-%s' %(self.id()))
self.assertNotIn(vn_miss_obj.uuid, cache_mgr._cache.keys())
vn_hit_stale_row = uuid_cf.get(vn_hit_stale_obj.uuid)
with uuid_cf.patches([
('column', (vn_hit_fresh_obj.uuid,
'prop:display_name', 'fresh-check-name')),
('column', (vn_hit_stale_obj.uuid,
'prop:display_name', 'stale-check-name')),
('column', (vn_hit_stale_obj.uuid,
'prop:id_perms', vn_hit_stale_row['prop:id_perms'])),
]):
vn_uuids = [vn_hit_fresh_obj.uuid, vn_hit_stale_obj.uuid,
vn_miss_obj.uuid]
ret_vn_dicts = self._vnc_lib.virtual_networks_list(
obj_uuids=vn_uuids,
fields=['display_name'])['virtual-networks']
self.assertEqual(len(ret_vn_dicts), 3)
id_name_tuples = [(vn['uuid'], vn['display_name'])
for vn in ret_vn_dicts]
self.assertIn(
(vn_hit_fresh_obj.uuid, vn_hit_fresh_obj.display_name),
id_name_tuples)
self.assertIn((vn_hit_stale_obj.uuid, 'stale-check-name'),
id_name_tuples)
self.assertIn((vn_miss_obj.uuid, vn_miss_obj.display_name),
id_name_tuples)
# end test_hits_stales_misses
def test_evict_on_ref_type_same(self):
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
vn1_name = 'vn-1-%s' %(self.id())
vn2_name = 'vn-2-%s' %(self.id())
vn1_obj = self.create_test_object(vn1_name)
vn2_obj = self.create_test_object(vn2_name)
# prime RIs to cache
ri1_obj = self._vnc_lib.routing_instance_read(
fq_name=vn1_obj.fq_name+[vn1_name])
ri2_obj = self._vnc_lib.routing_instance_read(
fq_name=vn2_obj.fq_name+[vn2_name])
self.assertIn(ri1_obj.uuid, cache_mgr._cache.keys())
self.assertIn(ri2_obj.uuid, cache_mgr._cache.keys())
ri1_obj.add_routing_instance(ri2_obj, None)
self._vnc_lib.routing_instance_update(ri1_obj)
self.assertNotIn(ri2_obj.uuid, cache_mgr._cache.keys())
# end test_evict_on_ref_type_same
def test_stale_for_backref_on_ref_update(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
ipam_obj = NetworkIpam('ipam-%s' %(self.id()),
display_name='ipam-name')
self._vnc_lib.network_ipam_create(ipam_obj)
self._vnc_lib.virtual_network_create(vn_obj)
# prime ipam in cache
self._vnc_lib.network_ipam_read(fq_name=ipam_obj.fq_name)
self.assertIn(ipam_obj.uuid, cache_mgr._cache.keys())
vn_obj.add_network_ipam(ipam_obj,
VnSubnetsType(
[IpamSubnetType(SubnetType('192.168.3.11', 28))]))
self._vnc_lib.virtual_network_update(vn_obj)
with uuid_cf.patches([
('column',
(ipam_obj.uuid, 'prop:display_name', 'stale-check-name'))]):
# access for ipam without children/backref should hit cache
ret_ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=ipam_obj.fq_name)
self.assertEqual(ret_ipam_obj.display_name, ipam_obj.display_name)
# access for ipam with backref should hit cache but stale
ret_ipam_obj = self._vnc_lib.network_ipam_read(
fq_name=ipam_obj.fq_name, fields=['virtual_network_back_refs'])
self.assertEqual(ret_ipam_obj.display_name, 'stale-check-name')
# end test_stale_for_backref_on_ref_update
def test_read_for_delete_not_from_cache(self):
uuid_cf = self.uuid_cf
cache_mgr = self.cache_mgr
ipam_obj = NetworkIpam('ipam-%s' %(self.id()),
display_name='ipam-name')
self._vnc_lib.network_ipam_create(ipam_obj)
# prime ipam in cache
self._vnc_lib.network_ipam_read(fq_name=ipam_obj.fq_name)
self.assertIn(ipam_obj.uuid, cache_mgr._cache.keys())
vn_obj = VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn_obj)
with uuid_cf.patches([
('column', (ipam_obj.uuid,
'backref:virtual_network:%s' %(vn_obj.uuid),
json.dumps(None)))
]):
with ExpectedException(RefsExistError,
".*Delete when resource still referred.*"):
self._vnc_lib.network_ipam_delete(id=ipam_obj.uuid)
# end test_read_for_delete_not_from_cache
# end class TestCacheWithMetadata
class TestCacheWithMetadataEviction(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
return super(TestCacheWithMetadataEviction, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'object_cache_entries',
'2')])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadataEviction, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_evict_on_full(self):
vn1_obj = vnc_api.VirtualNetwork('vn-1-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn1_obj)
vn2_obj = vnc_api.VirtualNetwork('vn-2-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn2_obj)
vn3_obj = vnc_api.VirtualNetwork('vn-3-%s' %(self.id()))
self._vnc_lib.virtual_network_create(vn3_obj)
# prime with vn-1 and vn-2
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self._vnc_lib.virtual_network_read(id=vn1_obj.uuid)
self._vnc_lib.virtual_network_read(id=vn2_obj.uuid)
cache_keys = cache_mgr._cache.keys()
self.assertIn(vn1_obj.uuid, cache_keys)
self.assertIn(vn2_obj.uuid, cache_keys)
self.assertNotIn(vn3_obj.uuid, cache_keys)
# prime vn-3 and test eviction
self._vnc_lib.virtual_network_read(id=vn3_obj.uuid)
cache_keys = cache_mgr._cache.keys()
self.assertIn(vn3_obj.uuid, cache_keys)
if vn1_obj.uuid in cache_keys:
self.assertNotIn(vn2_obj.uuid, cache_keys)
elif vn2_obj.uuid in cache_keys:
self.assertNotIn(vn1_obj.uuid, cache_keys)
else:
self.assertTrue(
False, 'Eviction failed, all VNs present in cache')
# end test_evict_on_full
# end class TestCacheWithMetadataEviction
class TestCacheWithMetadataExcludeTypes(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
return super(TestCacheWithMetadataExcludeTypes, cls).setUpClass(
extra_config_knobs=[('DEFAULTS', 'object_cache_exclude_types',
'project, network-ipam')])
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestCacheWithMetadataExcludeTypes, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_exclude_types_not_cached(self):
# verify not cached for configured types
obj = vnc_api.Project('proj-%s' %(self.id()))
self._vnc_lib.project_create(obj)
self._vnc_lib.project_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertNotIn(obj.uuid, cache_mgr._cache.keys())
obj = vnc_api.NetworkIpam('ipam-%s' %(self.id()))
self._vnc_lib.network_ipam_create(obj)
self._vnc_lib.network_ipam_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertNotIn(obj.uuid, cache_mgr._cache.keys())
# verify cached for others
obj = vnc_api.VirtualNetwork('vn-%s' %(self.id()))
self._vnc_lib.virtual_network_create(obj)
self._vnc_lib.virtual_network_read(id=obj.uuid)
cache_mgr = self._api_server._db_conn._object_db._obj_cache_mgr
self.assertIn(obj.uuid, cache_mgr._cache.keys())
# end test_exclude_types_not_cached
# end class TestCacheWithMetadataExcludeTypes
class TestRefValidation(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestRefValidation, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestRefValidation, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_refs_validation_with_expected_error(self):
obj = VirtualNetwork('validate-create-error')
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project',
'network_ipam_refs': [
{'attr':
{'host_routes': None,
'ipam_subnets': [{'addr_from_start': None,
'alloc_unit': 1,
'allocation_pools': [],
'default_gateway': None,
'dhcp_option_list': None,
'dns_nameservers': [],
'dns_server_address': None,
'enable_dhcp': True,
'host_routes': None,
'subnet': {'ip_prefix': '192.168.3.11',
'ip_prefix_len': 24},
'subnet_name': None,
'subnet_uuid': 12}]},
'to': ['default-domain',
'default-project']}]}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(400))
self.assertThat(content, Contains('Bad reference'))
#end test_refs_validation_with_expected_error
def test_refs_validation_with_expected_success(self):
obj = VirtualNetwork('validate-create')
body_dict = {'virtual-network':
{'fq_name': obj.fq_name,
'parent_type': 'project',
'network_ipam_refs': [
{'attr':
{'host_routes': None,
'ipam_subnets': [{'addr_from_start': None,
'alloc_unit': 1,
'allocation_pools': [],
'default_gateway': None,
'dhcp_option_list': None,
'dns_nameservers': [],
'dns_server_address': None,
'enable_dhcp': True,
'host_routes': None,
'subnet': None,
'subnet': {'ip_prefix': '10.1.1.0',
'ip_prefix_len': 24},
'subnet_name': None,
'subnet_uuid': None}]},
'to': ['default-domain',
'default-project',
'default-network-ipam']}]}}
status, content = self._http_post('/virtual-networks',
body=json.dumps(body_dict))
self.assertThat(status, Equals(200))
#end test_refs_validation_with_expected_success
#end class TestRefValidation
class TestVncApiStats(test_case.ApiServerTestCase):
from cfgm_common.vnc_api_stats import log_api_stats
_sandesh = None
@log_api_stats
def _sample_function(self, obj_type):
raise cfgm_common.exceptions.HttpError(409, '')
def _check_sendwith(self, sandesh, stats, *args):
self.assertEqual(stats.response_code, 409)
self.assertEqual(stats.obj_type, 'abc')
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestVncApiStats, cls).setUpClass(*args, **kwargs)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestVncApiStats, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_response_code_on_exception(self):
from cfgm_common.vnc_api_stats import VncApiStatistics
try:
with test_common.patch(VncApiStatistics, 'sendwith', self._check_sendwith):
self._sample_function('abc')
except cfgm_common.exceptions.HttpError:
pass
else:
self.assertThat(0, 'Expecting HttpError to be raised, but was not raised')
# end test_response_code_on_exception
# end TestVncApiStats
class TestDbJsonExim(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestDbJsonExim, cls).setUpClass(*args, **kwargs)
cls.to_bgp_ks = '%s_to_bgp_keyspace' %(cls._cluster_id)
cls.svc_mon_ks = '%s_svc_monitor_keyspace' %(cls._cluster_id)
cls.dev_mgr_ks = '%s_dm_keyspace' %(cls._cluster_id)
# end setUpClass
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestDbJsonExim, cls).tearDownClass(*args, **kwargs)
# end tearDownClass
def test_db_exim_args(self):
with ExpectedException(db_json_exim.InvalidArguments,
'Both --import-from and --export-to cannot be specified'):
db_json_exim.DatabaseExim("--import-from foo --export-to bar")
# end test_db_exim_args
def test_db_export(self):
with tempfile.NamedTemporaryFile() as export_dump:
patch_ks = test_common.FakeSystemManager.patch_keyspace
with patch_ks(self.to_bgp_ks, {}), \
patch_ks(self.svc_mon_ks, {}), \
patch_ks(self.dev_mgr_ks, {}):
vn_obj = self._create_test_object()
db_json_exim.DatabaseExim('--export-to %s --cluster_id %s' %(
export_dump.name, self._cluster_id)).db_export()
dump = json.loads(export_dump.readlines()[0])
dump_cassandra = dump['cassandra']
dump_zk = json.loads(dump['zookeeper'])
uuid_table = dump_cassandra['config_db_uuid']['obj_uuid_table']
self.assertEqual(uuid_table[vn_obj.uuid]['fq_name'][0],
json.dumps(vn_obj.get_fq_name()))
zk_node = [node for node in dump_zk
if node[0] == '%s/fq-name-to-uuid/virtual_network:%s/' %(
self._cluster_id, vn_obj.get_fq_name_str())]
self.assertEqual(len(zk_node), 1)
self.assertEqual(zk_node[0][1][0], vn_obj.uuid)
# end test_db_export
def test_db_export_with_omit_keyspaces(self):
with tempfile.NamedTemporaryFile() as export_dump:
vn_obj = self._create_test_object()
omit_ks = set(db_json_exim.KEYSPACES) - set(['config_db_uuid'])
args = '--export-to %s --omit-keyspaces ' %(export_dump.name)
for ks in list(omit_ks):
args += '%s ' %(ks)
args += '--cluster_id %s' %(self._cluster_id)
db_json_exim.DatabaseExim(args).db_export()
dump = json.loads(export_dump.readlines()[0])
dump_cassandra = dump['cassandra']
dump_zk = json.loads(dump['zookeeper'])
uuid_table = dump_cassandra['config_db_uuid']['obj_uuid_table']
self.assertEqual(uuid_table[vn_obj.uuid]['fq_name'][0],
json.dumps(vn_obj.get_fq_name()))
zk_node = [node for node in dump_zk
if node[0] == '%s/fq-name-to-uuid/virtual_network:%s/' %(
self._cluster_id, vn_obj.get_fq_name_str())]
self.assertEqual(len(zk_node), 1)
self.assertEqual(zk_node[0][1][0], vn_obj.uuid)
# end test_db_export_with_omit_keyspaces
def test_db_export_and_import(self):
with tempfile.NamedTemporaryFile() as dump_f:
patch_ks = test_common.FakeSystemManager.patch_keyspace
with patch_ks(self.to_bgp_ks, {}), \
patch_ks(self.svc_mon_ks, {}), \
patch_ks(self.dev_mgr_ks, {}):
vn_obj = self._create_test_object()
db_json_exim.DatabaseExim('--export-to %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_export()
with ExpectedException(db_json_exim.CassandraNotEmptyError):
db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_import()
uuid_cf = self.get_cf(
'config_db_uuid', 'obj_uuid_table')
fq_name_cf = self.get_cf(
'config_db_uuid', 'obj_fq_name_table')
shared_cf = self.get_cf(
'config_db_uuid', 'obj_shared_table')
with uuid_cf.patch_cf({}), fq_name_cf.patch_cf({}), \
shared_cf.patch_cf({}):
with ExpectedException(
db_json_exim.ZookeeperNotEmptyError):
db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id)).db_import()
exim_obj = db_json_exim.DatabaseExim(
'--import-from %s --cluster_id %s' %(
dump_f.name, self._cluster_id))
with uuid_cf.patch_cf({}), fq_name_cf.patch_cf({}), \
shared_cf.patch_cf({}), | |
<filename>article_separation/baseline_clustering/dbscan_baselines.py
# -*- coding: utf-8 -*-
""" DBSCAN based on <NAME> "https://github.com/chrisjmccormick/dbscan" """
import math
import jpype
import collections
import numpy as np
from python_util.geometry.rectangle import Rectangle
from python_util.geometry.util import calc_reg_line_stats, get_dist_fast, get_in_dist, get_off_dist
from python_util.geometry.polygon import Polygon, norm_poly_dists
def get_list_of_scaled_polygons(lst_of_polygons, scaling_factor=1):
""" Computation of a list of scaled polygons on the basis of a given list of polygons.
:param lst_of_polygons: polygons to be scaled
:param scaling_factor: multiplication factor for all x and y coordinates describing the polygons
:return: list of the corresponding scaled polygons
"""
lst_of_scaled_polygons = []
for polygon in lst_of_polygons:
x_scaled = scaling_factor * np.array([polygon.x_points])
x_scaled = x_scaled.astype(int)[0].tolist()
y_scaled = scaling_factor * np.array([polygon.y_points])
y_scaled = y_scaled.astype(int)[0].tolist()
lst_of_scaled_polygons.append(Polygon(x_points=x_scaled, y_points=y_scaled, n_points=len(x_scaled)))
return lst_of_scaled_polygons
def get_list_of_interline_distances(lst_of_polygons, des_dist=5, max_d=500, use_java_code=True):
""" Calculates interline distances for every polygon according to "https://arxiv.org/pdf/1705.03311.pdf".
:param lst_of_polygons: list of polygons for which we want to get the interline distances
:param des_dist: desired distance (measured in pixels) of two adjacent pixels in the normed polygons
:param max_d: maximum distance (measured in pixels) for the calculation of the interline distances
:param use_java_code: usage of methods written in java (faster than python!) or not
:return: list of corresponding interline distances
"""
# calculation of the normed polygons (includes also the calculation of their bounding boxes!)
lst_of_normed_polygons = norm_poly_dists(poly_list=lst_of_polygons, des_dist=des_dist)
# call java code to calculate the interline distances
if use_java_code:
java_object = jpype.JPackage("java_util").Util()
lst_of_normed_polygon_java = []
for poly in lst_of_normed_polygons:
lst_of_normed_polygon_java.append(jpype.java.awt.Polygon(poly.x_points, poly.y_points, poly.n_points))
lst_of_interline_distances_java = \
java_object.calcInterlineDistances(lst_of_normed_polygon_java, des_dist, max_d)
lst_of_interline_distances = list(lst_of_interline_distances_java)
return lst_of_interline_distances
# call python code to calculate the interline distances
else:
lst_of_interline_distances = []
for poly_a in lst_of_normed_polygons:
# calculate the angle of the linear regression line representing the baseline polygon poly_a
angle = calc_reg_line_stats(poly_a)[0]
# orientation vector (given by angle) of length 1
or_vec_y, or_vec_x = math.sin(angle), math.cos(angle)
dist = max_d
# first and last point of polygon
pt_a1 = [poly_a.x_points[0], poly_a.y_points[0]]
pt_a2 = [poly_a.x_points[-1], poly_a.y_points[-1]]
# iterate over pixels of the current GT baseline polygon
for x_a, y_a in zip(poly_a.x_points, poly_a.y_points):
p_a = [x_a, y_a]
# iterate over all other polygons (to calculate X_G)
for poly_b in lst_of_normed_polygons:
if poly_b != poly_a:
# if polygon poly_b is too far away from pixel p_a, skip
if get_dist_fast(p_a, poly_b.get_bounding_box()) > dist:
continue
# get first and last pixel of baseline polygon poly_b
pt_b1 = poly_b.x_points[0], poly_b.y_points[0]
pt_b2 = poly_b.x_points[-1], poly_b.y_points[-1]
# calculate the inline distance of the points
in_dist1 = get_in_dist(pt_a1, pt_b1, or_vec_x, or_vec_y)
in_dist2 = get_in_dist(pt_a1, pt_b2, or_vec_x, or_vec_y)
in_dist3 = get_in_dist(pt_a2, pt_b1, or_vec_x, or_vec_y)
in_dist4 = get_in_dist(pt_a2, pt_b2, or_vec_x, or_vec_y)
if (in_dist1 < 0 and in_dist2 < 0 and in_dist3 < 0 and in_dist4 < 0) or (
in_dist1 > 0 and in_dist2 > 0 and in_dist3 > 0 and in_dist4 > 0):
continue
for p_b in zip(poly_b.x_points, poly_b.y_points):
if abs(get_in_dist(p_a, p_b, or_vec_x, or_vec_y)) <= 2 * des_dist:
dist = min(dist, abs(get_off_dist(p_a, p_b, or_vec_x, or_vec_y)))
if dist < max_d:
lst_of_interline_distances.append(dist)
else:
lst_of_interline_distances.append(max_d)
return lst_of_interline_distances
class DBSCANBaselines:
def __init__(self, list_of_polygons, min_polygons_for_cluster=2, min_polygons_for_article=1,
rectangle_interline_factor=1.25, des_dist=5, max_d=500,
use_java_code=True, target_average_interline_distance=50):
""" Initialization of the clustering process.
:param list_of_polygons: list of polygons
:param min_polygons_for_cluster: minimum number of required polygons in neighborhood to form a cluster
:param min_polygons_for_article: minimum number of required polygons forming an article
:param rectangle_interline_factor: multiplication factor to calculate the height of the rectangles during the
clustering progress with the help of the interline distances
:param des_dist: desired distance (measured in pixels) of two adjacent pixels in the normed polygons
:param max_d: maximum distance (measured in pixels) for the calculation of the interline distances
:param use_java_code: usage of methods written in java (faster than python!) or not
:param target_average_interline_distance: target interline distance for scaling of the polygons
"""
list_of_interline_distances = \
get_list_of_interline_distances(lst_of_polygons=list_of_polygons,
des_dist=des_dist, max_d=max_d, use_java_code=use_java_code)
average_list = [dist for dist in list_of_interline_distances if dist > 0]
# scaling the polygons to reach the target average interline distance
if target_average_interline_distance > 0 and len(average_list) > 0:
# computation of the average interline distance
average_interline_distance = 1 / len(average_list) * sum(average_list)
# computation of the polygon scaling factor
scale_fac = target_average_interline_distance / average_interline_distance
list_of_polygons_scaled = \
get_list_of_scaled_polygons(lst_of_polygons=list_of_polygons, scaling_factor=scale_fac)
list_of_interline_distances_scaled = \
get_list_of_interline_distances(lst_of_polygons=list_of_polygons_scaled,
des_dist=des_dist, max_d=max_d, use_java_code=use_java_code)
# computation of the scaled average interline distance
average_list_scaled = [dist for dist in list_of_interline_distances_scaled if dist > 0]
average_interline_distance_scaled = 1 / (len(average_list_scaled) + 1e-8) * sum(average_list_scaled)
self.list_of_normed_polygons = norm_poly_dists(poly_list=list_of_polygons_scaled, des_dist=des_dist)
self.list_of_interline_distances = list_of_interline_distances_scaled
self.avg = average_interline_distance_scaled
else:
# computation of the average interline distance
average_interline_distance = 1 / (len(average_list) + 1e-8) * sum(average_list)
self.list_of_normed_polygons = norm_poly_dists(poly_list=list_of_polygons, des_dist=des_dist)
self.list_of_interline_distances = list_of_interline_distances
self.avg = average_interline_distance
self.fac = rectangle_interline_factor
self.min_polygons_for_cluster = min_polygons_for_cluster
self.min_polygons_for_article = min_polygons_for_article
# initially all labels for all baselines are 0 (0 means the baseline hasn't been considered yet,
# -1 stands for noise, clusters are numbered starting from 1)
self.list_of_labels = [0] * len(self.list_of_normed_polygons)
self.list_if_center = [False] * len(self.list_of_normed_polygons)
print("Number of (detected) baselines contained by the image: {}".format(len(self.list_of_normed_polygons)))
def clustering_polygons(self):
""" Clusters the polygons with DBSCAN based approach. """
label = 0
# if valid center polygon is found, a new cluster is created
for polygon_index in range(len(self.list_of_normed_polygons)):
# if the polygon's label isn't 0, continue to the next polygon
if not (self.list_of_labels[polygon_index] == 0):
continue
# find all neighboring polygons
neighbor_polygons = DBSCANBaselines.region_query(self, polygon_index)
# if the number is below "min_polygons_for_cluster", this polygon is "noise"
# a noise polygon may later be picked up by another cluster as a boundary polygon
if len(neighbor_polygons) < self.min_polygons_for_cluster:
self.list_of_labels[polygon_index] = -1
# otherwise, this polygon is a center polygon for a new cluster
else:
label += 1
self.list_if_center[polygon_index] = True
# build the cluster
DBSCANBaselines.grow_cluster(self, polygon_index, neighbor_polygons, label)
def grow_cluster(self, polygon_index, neighbor_polygons, this_label):
""" Grows a new cluster with label "this_label" from a center polygon with index "polygon_index".
:param polygon_index: index of a center polygon of this new cluster
:param neighbor_polygons: all neighbors of the center polygon
:param this_label: label for this new cluster
"""
# assign the cluster label to the center polygon
self.list_of_labels[polygon_index] = this_label
# look at each neighbor of the center polygon; neighbor polygons will be used as a FIFO queue (while-loop)
# of polygons to search - it will grow as we discover new polygons for the cluster
i = 0
while i < len(neighbor_polygons):
neighbor_index = neighbor_polygons[i]
# if "neighbor_index" was labelled noise, we know it's not a center polygon (not enough neighbors),
# so make it a boundary polygon of the cluster and move on
if self.list_of_labels[neighbor_index] == -1:
self.list_of_labels[neighbor_index] = this_label
# if "neighbor_index" isn't already claimed, add the polygon to the cluster
elif self.list_of_labels[neighbor_index] == 0:
self.list_of_labels[neighbor_index] = this_label
# find all the neighbors of "neighbor_index"
next_neighbor_polygons = DBSCANBaselines.region_query(self, neighbor_index)
# if "neighbor_index" has at least "min_polygons_for_cluster" neighbors, it's a new center polygon
# add all of its neighbors to the FIFO queue
if len(next_neighbor_polygons) >= self.min_polygons_for_cluster:
self.list_if_center[neighbor_index] = True
neighbor_polygons += next_neighbor_polygons
# if "neighbor_index" doesn't have enough neighbors, don't queue up it's neighbors as expansion polygons
# # only necessary, if distance measure is not symmetric!
# # if another center polygon is in our neighborhood, merge the two clusters
# elif self.list_of_labels[neighbor_index] != this_label and self.list_if_center[neighbor_index]:
# self.list_of_labels = \
# [self.list_of_labels[neighbor_index] if x == this_label else x for x in self.list_of_labels]
#
# this_label = self.list_of_labels[neighbor_index]
# next point in the FIFO queue
i += 1
def region_query(self, polygon_index):
""" Finds all polygons in the dataset within the defined neighborhood of the considered polygon "polygon_index".
:param polygon_index: index of the considered polygon
:return: index list of the neighbor polygons
"""
neighbors = []
# computation of an extended bounding rectangle for "polygon_index"
poly1 = self.list_of_normed_polygons[polygon_index]
int_dis1 = self.list_of_interline_distances[polygon_index]
if not 0.5 * self.avg <= int_dis1 <= 1.5 * self.avg:
int_dis1 = self.avg
rec1_expanded = Rectangle(int(poly1.bounds.x), int(poly1.bounds.y - self.fac * int_dis1),
int(poly1.bounds.width), int(poly1.bounds.height + 2 * self.fac * int_dis1))
for i, normed_polygon_i in enumerate(self.list_of_normed_polygons):
if i == polygon_index:
continue
# computation of an | |
alpha,
np.uint8)
else:
if self._imcache is None:
self._imcache = self.to_rgba(A, bytes=True, norm=(A.ndim == 2))
output = self._imcache
# Subset the input image to only the part that will be
# displayed
subset = TransformedBbox(clip_bbox, t0.inverted()).frozen()
output = output[
int(max(subset.ymin, 0)):
int(min(subset.ymax + 1, output.shape[0])),
int(max(subset.xmin, 0)):
int(min(subset.xmax + 1, output.shape[1]))]
t = Affine2D().translate(
int(max(subset.xmin, 0)), int(max(subset.ymin, 0))) + t
return output, clipped_bbox.x0, clipped_bbox.y0, t
def make_image(self, renderer, magnification=1.0, unsampled=False):
"""
Normalize, rescale, and colormap this image's data for rendering using
*renderer*, with the given *magnification*.
If *unsampled* is True, the image will not be scaled, but an
appropriate affine transformation will be returned instead.
Returns
-------
image : (M, N, 4) uint8 array
The RGBA image, resampled unless *unsampled* is True.
x, y : float
The upper left corner where the image should be drawn, in pixel
space.
trans : Affine2D
The affine transformation from image to pixel space.
"""
raise NotImplementedError('The make_image method must be overridden')
def _check_unsampled_image(self):
"""
Return whether the image is better to be drawn unsampled.
The derived class needs to override it.
"""
return False
@martist.allow_rasterization
def draw(self, renderer, *args, **kwargs):
# if not visible, declare victory and return
if not self.get_visible():
self.stale = False
return
# for empty images, there is nothing to draw!
if self.get_array().size == 0:
self.stale = False
return
# actually render the image.
gc = renderer.new_gc()
self._set_gc_clip(gc)
gc.set_alpha(self._get_scalar_alpha())
gc.set_url(self.get_url())
gc.set_gid(self.get_gid())
if (renderer.option_scale_image() # Renderer supports transform kwarg.
and self._check_unsampled_image()
and self.get_transform().is_affine):
im, l, b, trans = self.make_image(renderer, unsampled=True)
if im is not None:
trans = Affine2D().scale(im.shape[1], im.shape[0]) + trans
renderer.draw_image(gc, l, b, im, trans)
else:
im, l, b, trans = self.make_image(
renderer, renderer.get_image_magnification())
if im is not None:
renderer.draw_image(gc, l, b, im)
gc.restore()
self.stale = False
def contains(self, mouseevent):
"""Test whether the mouse event occurred within the image."""
inside, info = self._default_contains(mouseevent)
if inside is not None:
return inside, info
# 1) This doesn't work for figimage; but figimage also needs a fix
# below (as the check cannot use x/ydata and extents).
# 2) As long as the check below uses x/ydata, we need to test axes
# identity instead of `self.axes.contains(event)` because even if
# axes overlap, x/ydata is only valid for event.inaxes anyways.
if self.axes is not mouseevent.inaxes:
return False, {}
# TODO: make sure this is consistent with patch and patch
# collection on nonlinear transformed coordinates.
# TODO: consider returning image coordinates (shouldn't
# be too difficult given that the image is rectilinear
trans = self.get_transform().inverted()
x, y = trans.transform([mouseevent.x, mouseevent.y])
xmin, xmax, ymin, ymax = self.get_extent()
if xmin > xmax:
xmin, xmax = xmax, xmin
if ymin > ymax:
ymin, ymax = ymax, ymin
if x is not None and y is not None:
inside = (xmin <= x <= xmax) and (ymin <= y <= ymax)
else:
inside = False
return inside, {}
def write_png(self, fname):
"""Write the image to png file *fname*."""
im = self.to_rgba(self._A[::-1] if self.origin == 'lower' else self._A,
bytes=True, norm=True)
PIL.Image.fromarray(im).save(fname, format="png")
def set_data(self, A):
"""
Set the image array.
Note that this function does *not* update the normalization used.
Parameters
----------
A : array-like or `PIL.Image.Image`
"""
if isinstance(A, PIL.Image.Image):
A = pil_to_array(A) # Needed e.g. to apply png palette.
self._A = cbook.safe_masked_invalid(A, copy=True)
if (self._A.dtype != np.uint8 and
not np.can_cast(self._A.dtype, float, "same_kind")):
raise TypeError("Image data of dtype {} cannot be converted to "
"float".format(self._A.dtype))
if self._A.ndim == 3 and self._A.shape[-1] == 1:
# If just one dimension assume scalar and apply colormap
self._A = self._A[:, :, 0]
if not (self._A.ndim == 2
or self._A.ndim == 3 and self._A.shape[-1] in [3, 4]):
raise TypeError("Invalid shape {} for image data"
.format(self._A.shape))
if self._A.ndim == 3:
# If the input data has values outside the valid range (after
# normalisation), we issue a warning and then clip X to the bounds
# - otherwise casting wraps extreme values, hiding outliers and
# making reliable interpretation impossible.
high = 255 if np.issubdtype(self._A.dtype, np.integer) else 1
if self._A.min() < 0 or high < self._A.max():
_log.warning(
'Clipping input data to the valid range for imshow with '
'RGB data ([0..1] for floats or [0..255] for integers).'
)
self._A = np.clip(self._A, 0, high)
# Cast unsupported integer types to uint8
if self._A.dtype != np.uint8 and np.issubdtype(self._A.dtype,
np.integer):
self._A = self._A.astype(np.uint8)
self._imcache = None
self._rgbacache = None
self.stale = True
def set_array(self, A):
"""
Retained for backwards compatibility - use set_data instead.
Parameters
----------
A : array-like
"""
# This also needs to be here to override the inherited
# cm.ScalarMappable.set_array method so it is not invoked by mistake.
self.set_data(A)
def get_interpolation(self):
"""
Return the interpolation method the image uses when resizing.
One of 'antialiased', 'nearest', 'bilinear', 'bicubic', 'spline16',
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric',
'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos',
or 'none'.
"""
return self._interpolation
def set_interpolation(self, s):
"""
Set the interpolation method the image uses when resizing.
If None, use :rc:`image.interpolation`. If 'none', the image is
shown as is without interpolating. 'none' is only supported in
agg, ps and pdf backends and will fall back to 'nearest' mode
for other backends.
Parameters
----------
s : {'antialiased', 'nearest', 'bilinear', 'bicubic', 'spline16', \
'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', \
'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none'} or None
"""
if s is None:
s = mpl.rcParams['image.interpolation']
s = s.lower()
_api.check_in_list(_interpd_, interpolation=s)
self._interpolation = s
self.stale = True
def set_interpolation_stage(self, s):
"""
Set when interpolation happens during the transform to RGBA.
Parameters
----------
s : {'data', 'rgba'} or None
Whether to apply up/downsampling interpolation in data or rgba
space.
"""
if s is None:
s = "data" # placeholder for maybe having rcParam
_api.check_in_list(['data', 'rgba'])
self._interpolation_stage = s
self.stale = True
def can_composite(self):
"""Return whether the image can be composited with its neighbors."""
trans = self.get_transform()
return (
self._interpolation != 'none' and
trans.is_affine and
trans.is_separable)
def set_resample(self, v):
"""
Set whether image resampling is used.
Parameters
----------
v : bool or None
If None, use :rc:`image.resample`.
"""
if v is None:
v = mpl.rcParams['image.resample']
self._resample = v
self.stale = True
def get_resample(self):
"""Return whether image resampling is used."""
return self._resample
def set_filternorm(self, filternorm):
"""
Set whether the resize filter normalizes the weights.
See help for `~.Axes.imshow`.
Parameters
----------
filternorm : bool
"""
self._filternorm = bool(filternorm)
self.stale = True
def get_filternorm(self):
"""Return whether the resize filter normalizes the weights."""
return self._filternorm
def set_filterrad(self, filterrad):
"""
Set the resize filter radius only applicable to some
interpolation schemes -- see help for imshow
Parameters
----------
filterrad : positive float
"""
r = float(filterrad)
if r <= 0:
raise ValueError("The filter radius must be a positive number")
self._filterrad = r
self.stale = True
def get_filterrad(self):
"""Return the filterrad setting."""
return self._filterrad
class AxesImage(_ImageBase):
"""
An image attached to an Axes.
Parameters
----------
ax : `~.axes.Axes`
The axes the image will belong to.
cmap : str or `~matplotlib.colors.Colormap`, default: :rc:`image.cmap`
The Colormap instance or registered colormap name used to map scalar
data to colors.
norm : `~matplotlib.colors.Normalize`
Maps luminance to 0-1.
interpolation : str, default: :rc:`image.interpolation`
Supported values are 'none', 'antialiased', 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite',
'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell',
'sinc', 'lanczos', 'blackman'.
interpolation_stage : {'data', 'rgba'}, default: 'data'
If 'data', interpolation
is carried out on the data provided by the user. If 'rgba', the
interpolation is carried out after the colormapping has been
applied (visual interpolation).
origin : {'upper', 'lower'}, default: :rc:`image.origin`
Place the [0, 0] index of the array in the upper left or lower left
corner of the axes. The convention 'upper' is typically used for
matrices and images.
extent : tuple, optional
The data axes (left, right, bottom, top) for making image plots
registered with data plots. Default is to label the pixel
centers with the zero-based row and column indices.
filternorm : bool, | |
= self.get_trigger_sha_and_repo(job_id)
gradle9 = self.dispatcher.analyze(file_path, job_id,
trigger_sha=trigger_sha, repo=repo)
self.compare_status(gradle9, 'broken')
self.compare_analyzer(gradle9, 'java-gradle')
self.compare_num_t_run(gradle9, 1544)
self.compare_num_t_ok(gradle9, 1543)
self.compare_num_t_failed(gradle9, 1)
self.compare_num_t_skipped(gradle9, 0)
self.compare_bool_t_ran(gradle9, True)
self.compare_bool_t_failed(gradle9, True)
self.compare_frameworks(gradle9, 'JUnit')
self.compare_tr_t_failed(
gradle9,
'test.thread.parallelization.ParallelByMethodsTestCase6Scenario1.verifyThatTestMethodsRunInParallelThreads')
def test_gradle_10(self):
log = '153491211-orig.log'
job_id = 153491211
file_path = 'gradle/' + log
trigger_sha, repo = self.get_trigger_sha_and_repo(job_id)
gradle10 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_status(gradle10, 'broken')
self.compare_analyzer(gradle10, 'java-gradle')
self.compare_num_t_run(gradle10, 740)
self.compare_num_t_ok(gradle10, 680)
self.compare_num_t_failed(gradle10, 60)
self.compare_num_t_skipped(gradle10, 16)
self.compare_bool_t_ran(gradle10, True)
self.compare_bool_t_failed(gradle10, True)
self.compare_frameworks(gradle10, 'JUnit')
self.compare_tr_t_failed(gradle10, 'com.tngtech.jgiven.examples.coffeemachine.ServeCoffeeTest.a'
'_scenario_with_a_failing_test_case_for_demonstration_purposes[1:'
' false]#com.tngtech.jgiven.examples.coffeemachine.ServeCoffeeTest.'
'shouldFailWithUnexpectedRuntimeException#com.tngtech.jgiven.'
'examples.coffeemachine.ServeCoffeeTest.a_failing_scenario'
'_for_demonstration_purposes#com.tngtech.jgiven.examples.'
'nested.NestedStepsTest.a_scenario_with_a_failing_nested'
'_step_on_purpose#com.tngtech.jgiven.examples.parameters'
'.ParametrizedScenariosTest.a_scenario_with_many_cases[5:'
' some grouping value 0, value 5]#com.tngtech.jgiven.examples'
'.parameters.ParametrizedScenariosTest.a_scenario_with_many'
'_cases[15: some grouping value 1, value 5]#com.tngtech.jgiven'
'.examples.parameters.ParametrizedScenariosTest.a_scenario'
'_with_many_cases[25: some grouping value 2, value 5]#com.'
'tngtech.jgiven.examples.parameters.ParametrizedScenariosTest'
'.a_scenario_with_many_cases[35: some grouping value 3, value'
' 5]#com.tngtech.jgiven.examples.parameters.Parametrized'
'ScenariosTest.a_scenario_with_many_cases[45: some grouping'
' value 4, value 5]#com.tngtech.jgiven.examples.parameters'
'.ParametrizedScenariosTest.a_scenario_with_many_cases[55'
': some grouping value 5, value 5]#com.tngtech.jgiven.'
'examples.parameters.ParametrizedScenariosTest.a_scenario'
'_with_many_cases[65: some grouping value 6, value 5]#com'
'.tngtech.jgiven.examples.parameters.ParametrizedScenarios'
'Test.a_scenario_with_many_cases[75: some grouping value '
'7, value 5]#com.tngtech.jgiven.examples.parameters.Param'
'etrizedScenariosTest.a_scenario_with_many_cases[85: some'
' grouping value 8, value 5]#com.tngtech.jgiven.examples.pa'
'rameters.ParametrizedScenariosTest.a_scenario_with_many_ca'
'ses[95: some grouping value 9, value 5]#com.tngtech.jgive'
'n.integration.spring.test.XmlConfiguredSpringScenarioTest'
'Test.spring_can_inject_beans_into_stages#com.tngtech.jgiv'
'en.examples.coffeemachine.ServeCoffeeTest.a_scenario_with'
'_a_failing_test_case_for_demonstration_purposes[1: false]'
'#com.tngtech.jgiven.examples.coffeemachine.ServeCoffeeTest'
'.shouldFailWithUnexpectedRuntimeException#com.tngtech.jgi'
'ven.examples.coffeemachine.ServeCoffeeTest.a_failing_scen'
'ario_for_demonstration_purposes#com.tngtech.jgiven.exampl'
'es.nested.NestedStepsTest.a_scenario_with_a_failing_nest'
'ed_step_on_purpose#com.tngtech.jgiven.examples.parameters'
'.ParametrizedScenariosTest.a_scenario_with_many_cases[5: s'
'ome grouping value 0, value 5]#com.tngtech.jgiven.examples'
'.parameters.ParametrizedScenariosTest.a_scenario_with_many'
'_cases[15: some grouping value 1, value 5]#com.tngtech.jgi'
'ven.examples.parameters.ParametrizedScenariosTest.a_scenar'
'io_with_many_cases[25: some grouping value 2, value 5]#com'
'.tngtech.jgiven.examples.parameters.ParametrizedScenariosT'
'est.a_scenario_with_many_cases[35: some grouping value 3, '
'value 5]#com.tngtech.jgiven.examples.parameters.Parametriz'
'edScenariosTest.a_scenario_with_many_cases[45: some groupi'
'ng value 4, value 5]#com.tngtech.jgiven.examples.parameter'
's.ParametrizedScenariosTest.a_scenario_with_many_cases[55: '
'some grouping value 5, value 5]#com.tngtech.jgiven.example'
's.parameters.ParametrizedScenariosTest.a_scenario_with_man'
'y_cases[65: some grouping value 6, value 5]#com.tngtech.jg'
'iven.examples.parameters.ParametrizedScenariosTest.a_scena'
'rio_with_many_cases[75: some grouping value 7, value 5]#co'
'm.tngtech.jgiven.examples.parameters.ParametrizedScenariosT'
'est.a_scenario_with_many_cases[85: some grouping value 8, v'
'alue 5]#com.tngtech.jgiven.examples.parameters.Parametrize'
'dScenariosTest.a_scenario_with_many_cases[95: some groupin'
'g value 9, value 5]#com.tngtech.jgiven.integration.spring.'
'test.XmlConfiguredSpringScenarioTestTest.spring_can_inject'
'_beans_into_stages#com.tngtech.jgiven.examples.coffeemachi'
'ne.ServeCoffeeTest.a_scenario_with_a_failing_test_case_for'
'_demonstration_purposes[1: false]#com.tngtech.jgiven.examp'
'les.coffeemachine.ServeCoffeeTest.shouldFailWithUnexpected'
'RuntimeException#com.tngtech.jgiven.examples.coffeemachine'
'.ServeCoffeeTest.a_failing_scenario_for_demonstration_purp'
'oses#com.tngtech.jgiven.examples.nested.NestedStepsTest.a_'
'scenario_with_a_failing_nested_step_on_purpose#com.tngtech'
'.jgiven.examples.parameters.ParametrizedScenariosTest.a_sc'
'enario_with_many_cases[5: some grouping value 0, value 5]#'
'com.tngtech.jgiven.examples.parameters.ParametrizedScenari'
'osTest.a_scenario_with_many_cases[15: some grouping value '
'1, value 5]#com.tngtech.jgiven.examples.parameters.Paramet'
'rizedScenariosTest.a_scenario_with_many_cases[25: some gro'
'uping value 2, value 5]#com.tngtech.jgiven.examples.parame'
'ters.ParametrizedScenariosTest.a_scenario_with_many_cases['
'35: some grouping value 3, value 5]#com.tngtech.jgiven.exa'
'mples.parameters.ParametrizedScenariosTest.a_scenario_with'
'_many_cases[45: some grouping value 4, value 5]#com.tngtech'
'.jgiven.examples.parameters.ParametrizedScenariosTest.a_sc'
'enario_with_many_cases[55: some grouping value 5, value 5]'
'#com.tngtech.jgiven.examples.parameters.ParametrizedScena'
'riosTest.a_scenario_with_many_cases[65: some grouping val'
'ue 6, value 5]#com.tngtech.jgiven.examples.parameters.Par'
'ametrizedScenariosTest.a_scenario_with_many_cases[75: som'
'e grouping value 7, value 5]#com.tngtech.jgiven.examples.'
'parameters.ParametrizedScenariosTest.a_scenario_with_many'
'_cases[85: some grouping value 8, value 5]#com.tngtech.jg'
'iven.examples.parameters.ParametrizedScenariosTest.a_scen'
'ario_with_many_cases[95: some grouping value 9, value 5]#c'
'om.tngtech.jgiven.integration.spring.test.XmlConfiguredSpr'
'ingScenarioTestTest.spring_can_inject_beans_into_stages#co'
'm.tngtech.jgiven.examples.coffeemachine.ServeCoffeeTest.a_'
'scenario_with_a_failing_test_case_for_demonstration_purpos'
'es[1: false]#com.tngtech.jgiven.examples.coffeemachine.Ser'
'veCoffeeTest.shouldFailWithUnexpectedRuntimeException#com.'
'tngtech.jgiven.examples.coffeemachine.ServeCoffeeTest.a_fa'
'iling_scenario_for_demonstration_purposes#com.tngtech.jgive'
'n.examples.nested.NestedStepsTest.a_scenario_with_a_failing'
'_nested_step_on_purpose#com.tngtech.jgiven.examples.paramet'
'ers.ParametrizedScenariosTest.a_scenario_with_many_cases[5:'
' some grouping value 0, value 5]#com.tngtech.jgiven.example'
's.parameters.ParametrizedScenariosTest.a_scenario_with_many'
'_cases[15: some grouping value 1, value 5]#com.tngtech.jgiv'
'en.examples.parameters.ParametrizedScenariosTest.a_scenario'
'_with_many_cases[25: some grouping value 2, value 5]#com.tn'
'gtech.jgiven.examples.parameters.ParametrizedScenariosTest.a'
'_scenario_with_many_cases[35: some grouping value 3, value 5'
']#com.tngtech.jgiven.examples.parameters.ParametrizedScenari'
'osTest.a_scenario_with_many_cases[45: some grouping value 4, '
'value 5]#com.tngtech.jgiven.examples.parameters.Parametrized'
'ScenariosTest.a_scenario_with_many_cases[55: some grouping v'
'alue 5, value 5]#com.tngtech.jgiven.examples.parameters.Para'
'metrizedScenariosTest.a_scenario_with_many_cases[65: some gr'
'ouping value 6, value 5]#com.tngtech.jgiven.examples.paramet'
'ers.ParametrizedScenariosTest.a_scenario_with_many_cases[75:'
' some grouping value 7, value 5]#com.tngtech.jgiven.examples'
'.parameters.ParametrizedScenariosTest.a_scenario_with_many_c'
'ases[85: some grouping value 8, value 5]#com.tngtech.jgiven.'
'examples.parameters.ParametrizedScenariosTest.a_scenario_wit'
'h_many_cases[95: some grouping value 9, value 5]#com.tngtech'
'.jgiven.integration.spring.test.XmlConfiguredSpringScenarioT'
'estTest.spring_can_inject_beans_into_stages')
def test_ant_0(self):
log = '264241708-orig.log'
job_id = 264241708
file_path = 'ant/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
ant0 = self.dispatcher.analyze(file_path, job_id,
trigger_sha=trigger_sha, repo=repo)
self.compare_status(ant0, 'broken')
self.compare_analyzer(ant0, 'java-ant')
self.compare_num_t_run(ant0, 287)
self.compare_num_t_ok(ant0, 286)
self.compare_num_t_failed(ant0, 1)
self.compare_num_t_skipped(ant0, 0)
self.compare_bool_t_ran(ant0, True)
self.compare_bool_t_failed(ant0, True)
self.compare_frameworks(ant0, 'JUnit')
self.compare_tr_t_failed(
ant0, 'wyc.testing.AllInvalidTest.invalid[Import_Invalid_1]')
def test_ant_1(self):
log = '233645906-orig.log'
job_id = 233645906
file_path = 'ant/' + log
trigger_sha, repo = self.get_trigger_sha_and_repo(job_id)
ant1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_status(ant1, 'broken')
self.compare_analyzer(ant1, 'java-ant')
self.compare_num_t_run(ant1, 1367)
self.compare_num_t_ok(ant1, 1341)
self.compare_num_t_failed(ant1, 26)
self.compare_num_t_skipped(ant1, 0)
self.compare_bool_t_ran(ant1, True)
self.compare_bool_t_failed(ant1, True)
self.compare_frameworks(ant1, 'JUnit')
self.compare_tr_t_failed(ant1, 'wyc.testing.AllValidVerificationTest.validVerification[Constrain'
'edReference_Valid_1]#wyc.testing.AllValidVerificationTest.validV'
'erification[FunctionRef_Valid_7]#wyc.testing.AllValidVerificatio'
'nTest.validVerification[Lifetime_Lambda_Valid_6]#wyc.testing.AllV'
'alidVerificationTest.validVerification[Lifetime_Lambda_Valid_7]#w'
'yc.testing.AllValidVerificationTest.validVerification[Lifetime_Va'
'lid_1]#wyc.testing.AllValidVerificationTest.validVerification[Lif'
'etime_Valid_2]#wyc.testing.AllValidVerificationTest.validVerifica'
'tion[Lifetime_Valid_3]#wyc.testing.AllValidVerificationTest.valid'
'Verification[Lifetime_Valid_4]#wyc.testing.AllValidVerificationTe'
'st.validVerification[Lifetime_Valid_5]#wyc.testing.AllValidVerifi'
'cationTest.validVerification[MessageRef_Valid_2]#wyc.testing.AllV'
'alidVerificationTest.validVerification[MessageSend_Valid_2]#wyc.'
'testing.AllValidVerificationTest.validVerification[MessageSend_V'
'alid_3]#wyc.testing.AllValidVerificationTest.validVerification[Me'
'ssageSend_Valid_4]#wyc.testing.AllValidVerificationTest.validVeri'
'fication[MessageSend_Valid_5]#wyc.testing.AllValidVerificationTes'
't.validVerification[MethodCall_Valid_4]#wyc.testing.AllValidVerif'
'icationTest.validVerification[ProcessAccess_Valid_1]#wyc.testing.'
'AllValidVerificationTest.validVerification[Process_Valid_12]#wyc.'
'testing.AllValidVerificationTest.validVerification[Process_Valid_4'
']#wyc.testing.AllValidVerificationTest.validVerification[Process_V'
'alid_5]#wyc.testing.AllValidVerificationTest.validVerification[Pro'
'cess_Valid_6]#wyc.testing.AllValidVerificationTest.validVerificat'
'ion[Process_Valid_7]#wyc.testing.AllValidVerificationTest.validVer'
'ification[Process_Valid_8]#wyc.testing.AllValidVerificationTest.va'
'lidVerification[RecordAccess_Valid_1]#wyc.testing.AllValidVerifica'
'tionTest.validVerification[Reference_Valid_1]#wyc.testing.AllValid'
'VerificationTest.validVerification[Reference_Valid_7]#wyc.testing.'
'AllValidVerificationTest.validVerification[Reference_Valid_8]')
def test_build_system_0(self):
log = '88551597.log'
job_id = 88551597
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mf1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mf1, 'Gradle')
def test_build_system_1(self):
log = '165108370.log'
job_id = 165108370
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mf2 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mf2, 'Maven')
def test_build_system_2(self):
log = '144826559.log'
job_id = 144826559
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mf3 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mf3, 'Gradle')
def test_build_system_3(self):
log = '251797108.log'
job_id = 251797108
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mf4 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mf4, 'Maven')
def test_build_system_4(self):
log = '250416678.log'
job_id = 250416678
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mvn1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mvn1, 'Maven')
def test_build_system_5(self):
log = '259221978.log'
job_id = 259221978
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mvn2 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mvn2, 'Maven')
def test_build_system_6(self):
log = '161141427.log'
job_id = 161141427
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
ant1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(ant1, 'Ant')
def test_build_system_7(self):
log = '81961806.log'
job_id = 81961806
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
play1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(play1, 'play')
def test_build_system_8(self):
log = '92030727.log'
job_id = 92030727
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
play2 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(play2, 'play')
def test_build_system_9(self):
log = '160772310.log'
job_id = 160772310
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
none2 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(none2, 'NA')
def test_build_system_10(self):
log = '156977713.log'
job_id = 156977713
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
none3 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(none3, 'NA')
def test_build_system_11(self):
log = '97793256.log'
job_id = 97793256
file_path = 'build_system_testing/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
mf5 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_build_system(mf5, 'Maven')
def test_other_analyzer_0(self):
log = '81961806.log'
job_id = 81961806
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa0 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa0, 'java-other')
self.compare_build_system(oa0, 'play')
self.compare_bool_t_ran(oa0, False)
self.compare_num_t_run(oa0, 0)
self.compare_num_t_ok(oa0, 'NA')
self.compare_num_t_failed(oa0, 0)
self.compare_num_t_skipped(oa0, 'NA')
self.compare_t_duration(oa0, 117.0)
def test_other_analyzer_1(self):
log = '81965531.log'
job_id = 81965531
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa1 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa1, 'java-other')
self.compare_build_system(oa1, 'play')
self.compare_bool_t_ran(oa1, True)
self.compare_num_t_run(oa1, 6)
self.compare_num_t_ok(oa1, 6)
self.compare_num_t_failed(oa1, 0)
self.compare_num_t_skipped(oa1, 1)
self.compare_t_duration(oa1, 174.0)
def test_other_analyzer_2(self):
log = '92030727.log'
job_id = 92030727
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa2 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa2, 'java-other')
self.compare_build_system(oa2, 'play')
self.compare_bool_t_ran(oa2, False)
self.compare_num_t_run(oa2, 0)
self.compare_num_t_ok(oa2, 'NA')
self.compare_num_t_failed(oa2, 0)
self.compare_num_t_skipped(oa2, 'NA')
self.compare_t_duration(oa2, 115.0)
def test_other_analyzer_3(self):
log = '92031917.log'
job_id = 92031917
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa3 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa3, 'java-other')
self.compare_build_system(oa3, 'play')
self.compare_bool_t_ran(oa3, True)
self.compare_num_t_run(oa3, 5)
self.compare_num_t_ok(oa3, 5)
self.compare_num_t_failed(oa3, 0)
self.compare_num_t_skipped(oa3, 0)
self.compare_t_duration(oa3, 161.0)
def test_other_analyzer_4(self):
log = '156977713.log'
job_id = 156977713
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa4 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa4, 'java-other')
self.compare_build_system(oa4, 'NA')
self.compare_bool_t_ran(oa4, False)
self.compare_num_t_run(oa4, 0)
self.compare_num_t_ok(oa4, 'NA')
self.compare_num_t_failed(oa4, 0)
self.compare_num_t_skipped(oa4, 'NA')
def test_other_analyzer_5(self):
log = '157259479.log'
job_id = 157259479
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa5 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa5, 'java-other')
self.compare_build_system(oa5, 'NA')
self.compare_bool_t_ran(oa5, True)
self.compare_num_t_run(oa5, 453)
self.compare_num_t_ok(oa5, 453)
self.compare_num_t_failed(oa5, 0)
self.compare_num_t_skipped(oa5, 0)
self.compare_t_duration(oa5, 71.0)
def test_other_analyzer_6(self):
log = '156977714.log'
job_id = 156977714
file_path = 'other/' + log
(trigger_sha, repo) = self.get_trigger_sha_and_repo(job_id)
oa6 = self.dispatcher.analyze(file_path, job_id, trigger_sha=trigger_sha, repo=repo)
self.compare_analyzer(oa6, 'java-other')
self.compare_build_system(oa6, 'NA')
self.compare_bool_t_ran(oa6, False)
self.compare_num_t_run(oa6, 0)
self.compare_num_t_ok(oa6, 'NA')
self.compare_num_t_failed(oa6, 0)
self.compare_num_t_skipped(oa6, 'NA')
def test_javascript_analyzer_0(self):
log = '425225977.log'
job_id = 425225977
file_path = 'javascript/mocha/' + log
jsa = self.dispatcher.analyze(file_path, job_id)
self.compare_num_t_run(jsa, 11)
self.compare_num_t_ok(jsa, 0)
self.compare_num_t_failed(jsa, 11)
self.compare_t_duration(jsa, 0.654)
self.compare_tr_t_failed(jsa, 'Html5 history navigation "before all" hook:'
'#Html5 history navigation "after all" hook:'
'#Hashbang option enabled "before all" hook:'
'#Hashbang option enabled "after all" hook:'
'#Different Base "before all" hook:'
'#Different Base "after all" hook:'
'#URL path component decoding disabled "before all" hook:'
'#URL path component decoding disabled "after all" hook:'
'#Strict path matching enabled "before all" hook:'
'#Strict path matching enabled "after all" hook:'
'#File protocol "before all" hook:')
def test_javascript_analyzer_1(self):
log = '454920816.log'
job_id = 454920816
file_path = 'javascript/mocha/' + log
jsa = self.dispatcher.analyze(file_path, job_id)
self.compare_bool_t_ran(jsa, True)
self.compare_num_t_run(jsa, 189)
self.compare_num_t_ok(jsa, 188)
self.compare_num_t_skipped(jsa, 17)
self.compare_num_t_failed(jsa, 1)
self.compare_t_duration(jsa, 2.0)
self.compare_tr_t_failed(jsa, 'PapaParse piped streaming CSV should be correctly parsed:')
def test_javascript_analyzer_2(self):
log = '600889160.log'
job_id = 600889160
file_path = 'javascript/mocha/' + log
jsa = self.dispatcher.analyze(file_path, job_id)
self.compare_bool_t_ran(jsa, True)
self.compare_num_t_run(jsa, 24)
| |
o.website:
properties["web"] = o.website
n = f.req_site_needs
if n:
if n.needs:
needs = json.loads(n.needs)
if "urgent" in needs:
properties["urgent"] = needs["urgent"]
if "need" in needs:
properties["need"] = needs["need"]
if "no" in needs:
properties["no"] = needs["no"]
f = dict(type = "Feature",
properties = properties,
geometry = json.loads(geojson)
)
append(f)
data = dict(type = "FeatureCollection",
features = features
)
output = json.dumps(data, separators=SEPARATORS)
if jsonp:
filename = "facility.geojsonp"
output = "grid(%s)" % output
else:
filename = "facility.geojson"
path = os.path.join(current.request.folder,
"static", "cache",
filename)
File = open(path, "w")
File.write(output)
File.close()
# -----------------------------------------------------------------------------
def org_facility_rheader(r, tabs=[]):
"""
RHeader for facilities when doing a req_match
"""
T = current.T
s3db = current.s3db
# Need to use this format as otherwise /inv/incoming?viewing=org_office.x
# doesn't have an rheader
tablename, record = s3_rheader_resource(r)
r.record = record
r.table = s3db[tablename]
tabs = [(T("Details"), None)]
try:
tabs = tabs + s3db.req_tabs(r)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
rheader_fields = [["name"], ["location_id"]]
rheader = S3ResourceHeader(rheader_fields, tabs)(r)
return rheader
# =============================================================================
class S3RoomModel(S3Model):
"""
Rooms are a location within a Site
- used by Asset module
"""
names = ("org_room",
"org_room_id",
)
def model(self):
T = current.T
db = current.db
# ---------------------------------------------------------------------
# Rooms (for Sites)
# @ToDo: Validate to ensure that rooms are unique per facility
#
tablename = "org_room"
self.define_table(tablename,
self.org_site_id, # site_id
Field("name", length=128, notnull=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
*s3_meta_fields())
# CRUD strings
ADD_ROOM = T("Create Room")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_ROOM,
title_display = T("Room Details"),
title_list = T("Rooms"),
title_update = T("Edit Room"),
label_list_button = T("List Rooms"),
label_delete_button = T("Delete Room"),
msg_record_created = T("Room added"),
msg_record_modified = T("Room updated"),
msg_record_deleted = T("Room deleted"),
msg_list_empty = T("No Rooms currently registered"))
room_comment = DIV(
S3PopupLink(c = "org",
f = "room",
label = ADD_ROOM,
tooltip = T("Select a Room from the list or click 'Create Room'"),
),
# Filters Room based on site
SCRIPT(
'''$.filterOptionsS3({
'trigger':'site_id',
'target':'room_id',
'lookupPrefix':'org',
'lookupResource':'room'
})''')
)
# Reusable field for other tables to reference
represent = S3Represent(lookup=tablename)
room_id = S3ReusableField("room_id", "reference %s" % tablename,
label = T("Room"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_room.id",
represent
)),
sortby = "name",
comment = room_comment,
)
self.configure(tablename,
deduplicate = S3Duplicate(),
)
# Pass names back to global scope (s3.*)
return dict(org_room_id = room_id,
)
# =============================================================================
class S3OfficeModel(S3Model):
names = ("org_office",
"org_office_type",
"org_office_type_id",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
messages = current.messages
settings = current.deployment_settings
add_components = self.add_components
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
organisation_id = self.org_organisation_id
super_link = self.super_link
auth = current.auth
ADMIN = current.session.s3.system_roles.ADMIN
is_admin = auth.s3_has_role(ADMIN)
root_org = auth.root_org()
if is_admin:
filter_opts = ()
elif root_org:
filter_opts = (root_org, None)
else:
filter_opts = (None,)
# ---------------------------------------------------------------------
# Office Types
#
tablename = "org_office_type"
define_table(tablename,
Field("name", length=128, notnull=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128),
],
),
# Only included in order to be able to set
# realm_entity to filter appropriately
organisation_id(default = root_org,
readable = is_admin,
writable = is_admin,
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_OFFICE_TYPE = T("Create Office Type")
crud_strings[tablename] = Storage(
label_create = ADD_OFFICE_TYPE,
title_display = T("Office Type Details"),
title_list = T("Office Types"),
title_update = T("Edit Office Type"),
label_list_button = T("List Office Types"),
label_delete_button = T("Delete Office Type"),
msg_record_created = T("Office Type added"),
msg_record_modified = T("Office Type updated"),
msg_record_deleted = T("Office Type deleted"),
msg_list_empty = T("No Office Types currently registered"))
represent = S3Represent(lookup=tablename, translate=True)
office_type_id = S3ReusableField("office_type_id", "reference %s" % tablename,
label = T("Office Type"),
ondelete = "SET NULL",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_office_type.id",
represent,
sort=True,
filterby="organisation_id",
filter_opts=filter_opts,
)),
sortby = "name",
comment = S3PopupLink(c = "org",
f = "office_type",
label = ADD_OFFICE_TYPE,
title = T("Office Type"),
tooltip = T("If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'."),
),
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
)
# Components
add_components(tablename,
# Tags
org_office_type_tag = {"name": "tag",
"joinby": "office_type_id",
},
)
# ---------------------------------------------------------------------
# Offices
#
if settings.get_org_office_code_unique():
code_requires = IS_EMPTY_OR([IS_LENGTH(10),
IS_NOT_IN_DB(db, "org_office.code"),
])
else:
code_requires = IS_LENGTH(10)
tablename = "org_office"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
Field("name", notnull=True,
length=64, # Mayon Compatibility
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64),
],
),
Field("code", length=10, # Mayon compatibility
label = T("Code"),
# Deployments that don't wants office codes can hide them
#readable=False,
#writable=False,
requires = code_requires,
),
organisation_id(
requires = org_organisation_requires(required=True,
updateable=True),
),
office_type_id(
#readable = False,
#writable = False,
),
self.gis_location_id(),
Field("phone1",
label = T("Phone 1"),
represent = s3_phone_represent,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
widget = S3PhoneWidget(),
),
Field("phone2",
label = T("Phone 2"),
represent = s3_phone_represent,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
widget = S3PhoneWidget(),
),
Field("email",
label = T("Email"),
represent = lambda v: v or "",
requires = IS_EMPTY_OR(IS_EMAIL()),
),
Field("fax",
label = T("Fax"),
represent = s3_phone_represent,
requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),
widget = S3PhoneWidget(),
),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [messages["NONE"]])[0],
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
form_fields = ["name",
"code",
"organisation_id",
"office_type_id",
"location_id",
"phone1",
"phone2",
"email",
"fax",
"obsolete",
"comments",
]
org_summary = settings.get_org_summary()
if org_summary:
# Include Summary fields in form
position = form_fields.index("email")
form_fields.insert(position+1, "summary.national_staff")
form_fields.insert(position+2, "summary.international_staff")
crud_form = S3SQLCustomForm(*form_fields)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Office"),
title_display = T("Office Details"),
title_list = T("Offices"),
title_update = T("Edit Office"),
title_upload = T("Import Offices"),
title_map = T("Map of Offices"),
label_list_button = T("List Offices"),
label_delete_button = T("Delete Office"),
msg_record_created = T("Office added"),
msg_record_modified = T("Office updated"),
msg_record_deleted = T("Office deleted"),
msg_list_empty = T("No Offices currently registered"))
if settings.get_org_branches():
ORGANISATION = T("Organization/Branch")
comment = T("Search for office by organization or branch.")
org_filter = S3HierarchyFilter("organisation_id",
label = ORGANISATION,
comment = comment,
#hidden = True,
)
else:
ORGANISATION = T("Organization")
comment = T("Search for office by organization.")
org_filter = S3OptionsFilter("organisation_id",
label = ORGANISATION,
comment = comment,
# Doesn't support l10n
#represent = "%(name)s",
#hidden = True,
)
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
]
report_fields = ["name",
"organisation_id", # Filtered in Component views
"office_type_id",
]
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields = list(report_fields)
list_fields += [(T("Address"), "location_id$addr_street"),
"phone1",
"email",
]
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
#_class = "filter-search",
),
#S3OptionsFilter("office_type_id",
# label = T("Type"),
# #hidden = True,
# ),
org_filter,
S3LocationFilter("location_id",
label = T("Location"),
levels = levels,
#hidden = True,
),
]
report_options = Storage(
rows = report_fields,
cols = report_fields,
fact = ["count(id)",
"list(name)",
],
defaults = Storage(rows = lfield, # Lowest-level of hierarchy
cols = "office_type_id",
fact = "count(id)",
totals = True,
chart = "spectrum:rows",
),
)
configure(tablename,
context = {"location": "location_id",
"organisation": "organisation_id",
"org_group": "organisation_id$group_membership.group_id",
},
crud_form = crud_form,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.org_office_onaccept,
realm_components = ("contact_emergency",
"config",
"image",
"req",
"send",
"human_resource_site",
"note",
"contact",
"role",
"asset",
"commit",
"inv_item",
"document",
"recv",
"address",
),
report_options = report_options,
super_entity = ("doc_entity", "pr_pentity", "org_site"),
update_realm = True,
)
if org_summary:
add_components(tablename,
org_office_summary = {"name": "summary",
"joinby": "office_id",
"multiple": False,
},
)
# Pass names back to global scope (s3.*)
return dict(org_office_type_id = office_type_id,
)
# ---------------------------------------------------------------------
@staticmethod
def org_office_onaccept(form):
"""
* Update Affiliation and Realms
* Process injected fields
"""
form_vars = form.vars
# Affiliation, record ownership and component ownership
org_update_affiliations("org_office", form_vars)
if current.deployment_settings.get_org_summary():
db = current.db
id = form_vars.id
table = current.s3db.org_office_summary
query = (table.office_id == id)
existing = db(query).select(table.id,
limitby=(0, 1)).first()
post_vars = current.request.post_vars
national_staff = post_vars.get("national_staff", None)
international_staff = post_vars.get("international_staff", None)
if existing:
db(query).update(national_staff = national_staff,
international_staff = international_staff
)
elif national_staff or international_staff:
table.insert(office_id = id,
national_staff = national_staff,
international_staff = international_staff
)
# =============================================================================
class S3OfficeSummaryModel(S3Model):
"""
Office Summary fields visible when settings.org.summary = True
@ToDo: Deprecate in favour of S3OrganisationResourceModel
"""
names = | |
import json
import signal
import Queue
from ironsource.atom.ironsource_atom import IronSourceAtom
from ironsource.atom.queue_event_storage import QueueEventStorage
from ironsource.atom.batch_event_pool import BatchEventPool
from ironsource.atom.event import Event
import ironsource.atom.atom_logger as logger
import ironsource.atom.config as config
import time
import random
from threading import Lock
from threading import Thread
class IronSourceAtomTracker:
"""
ironSource Atom high level API class (Tracker), supports: track() and flush()
"""
TAG = "IronSourceAtomTracker"
def __init__(self,
batch_worker_count=config.BATCH_WORKER_COUNT,
batch_pool_size=config.BATCH_POOL_SIZE,
event_backlog=None,
backlog_size=config.BACKLOG_SIZE,
flush_interval=config.FLUSH_INTERVAL,
retry_max_time=config.RETRY_MAX_TIME,
retry_max_count=config.RETRY_MAX_COUNT,
batch_size=config.BATCH_SIZE,
batch_bytes_size=config.BATCH_BYTES_SIZE,
is_debug=False,
debug_to_file=False,
debug_file_path=config.DEBUG_FILE_PATH,
endpoint=config.ATOM_ENDPOINT,
auth_key="",
callback=None,
retry_forever=config.RETRY_FOREVER,
is_blocking=config.BACKLOG_BLOCKING,
backlog_timeout=config.BACKLOG_TIMEOUT,
request_timeout=config.REQUEST_TIMEOUT):
"""
Tracker init function
:param batch_worker_count: Optional, Number of workers(threads) for BatchEventPool
:type batch_worker_count: int
:param batch_pool_size: Optional, Number of events to hold in BatchEventPool
:type batch_pool_size: int
:param event_backlog: Optional, Custom EventStorage implementation
:type event_backlog: object
:param backlog_size: Optional, Backlog queue size (EventStorage ABC implementation)
:type backlog_size: int
:param flush_interval: Optional, Tracker flush interval in milliseconds (default 10000)
:type flush_interval: int
:param retry_max_time: Optional, Retry max time in seconds
:type retry_max_time: int
:param retry_max_count: Optional, Maximum number of retries in seconds
:type retry_max_count: int
:param batch_size: Optional, Amount of events in every batch (bulk) (default: 500)
:type batch_size: int
:param batch_bytes_size: Optional, Size of each batch (bulk) in bytes (default: 64KB)
:type batch_bytes_size: int
:param is_debug: Optional, Enable printing of debug information
:type is_debug: bool
:param debug_to_file: Optional, Should the Tracker write the request and response objects to file
:type debug_to_file: bool
:param debug_file_path: Optional, the path to the debug file (debug_to_file must be True) (default: /tmp)
:type debug_file_path: str
:param endpoint: Optional, Atom endpoint
:type endpoint: str
:param auth_key: Optional, Default auth key to use (when none is provided in .track)
:type auth_key: str
:param callback: Optional, callback to be called on error (Client 400/ Server 500)
:type callback: function
:param retry_forever: Optional, should the BatchEventPool retry forever on server error (default: True)
:type retry_forever: bool
:param is_blocking: Optional, should the tracker backlog block (default: True)
:type is_blocking: bool
:param backlog_timeout: Optional, tracker backlog block timeout (ignored if is_blocking, default: 1 second)
:type backlog_timeout: bool
:param request_timeout: Optional, HTTP requests lib session GET/POST timeout in seconds (default: 60 seconds)
:type request_timeout: int
"""
# Init Atom basic SDK
self._is_debug = is_debug
# For debug printing
self._debug_counter = 0
self._atom = IronSourceAtom(endpoint=endpoint,
is_debug=self._is_debug,
auth_key=auth_key,
request_timeout=request_timeout,
debug_to_file=debug_to_file,
debug_file_path=debug_file_path)
self._logger = logger.get_logger(debug=self._is_debug)
# Optional callback to be called on error, convention: time, status, error_msg, data
self._callback = callback if callable(callback) else lambda timestamp, status, error_msg, data, stream: None
self._is_run_worker = True
self._flush_all = False
self._alive = True
# Lock of accessing the stream_keys dict
self._data_lock = Lock()
# Streams to keys map
self._stream_keys = {}
# Retry with exponential backoff config
# Retry max time
if not isinstance(retry_max_time, int) or retry_max_time < 120:
self._logger.warning("Retry Max Time must be 120 or greater! Setting default: {}"
.format(config.RETRY_MAX_TIME))
retry_max_time = config.RETRY_MAX_TIME
self._retry_max_time = retry_max_time
# Retry max count
if not isinstance(retry_max_count, int) or retry_max_count < 1:
self._logger.warning("Retry Max Count must be 1 or greater! Setting default: {}"
.format(config.RETRY_MAX_COUNT))
retry_max_count = config.RETRY_MAX_COUNT
self._retry_max_count = retry_max_count
# Batch size
if not isinstance(batch_size, int) or batch_size < 1 or batch_size > config.BATCH_SIZE_LIMIT:
self._logger.warning("Invalid Bulk size, must between 1 to {max}, setting it to {default}"
.format(max=config.BATCH_SIZE_LIMIT, default=config.BATCH_SIZE))
batch_size = config.BATCH_SIZE
self._batch_size = batch_size
# Batch bytes size
if not isinstance(batch_bytes_size, int) \
or batch_bytes_size < 1024 \
or batch_bytes_size > config.BATCH_BYTES_SIZE_LIMIT:
self._logger.warning("Invalid Bulk byte size, must between 1KB to {max}KB, setting it to {default}KB"
.format(max=config.BATCH_BYTES_SIZE_LIMIT / 1024,
default=config.BATCH_BYTES_SIZE / 1024))
batch_bytes_size = config.BATCH_BYTES_SIZE
self._batch_bytes_size = batch_bytes_size
# Flush Interval
if not isinstance(flush_interval, int) or flush_interval < 1000:
self._logger.warning("Flush Interval must be 1000ms or greater! Setting default: {}"
.format(config.FLUSH_INTERVAL))
flush_interval = config.FLUSH_INTERVAL
self._flush_interval = flush_interval
# Holds the events after .track method
self._event_backlog = event_backlog if event_backlog else QueueEventStorage(queue_size=backlog_size,
block=is_blocking,
timeout=backlog_timeout)
# Retry forever on server error (500) - When False and no callback is provided it may cause data loss
self._retry_forever = retry_forever
# Holds batch of events for each stream and sends them using {thread_count} workers
self._batch_event_pool = BatchEventPool(thread_count=batch_worker_count,
max_events=batch_pool_size)
# Start the handler thread - daemon since we want to exit even if it didn't stop yet
handler_thread = Thread(target=self._tracker_handler)
handler_thread.daemon = True
handler_thread.start()
# Start the thread that handles periodic flushing
timer_thread = Thread(target=self._flush_peroidcly)
timer_thread.daemon = True
timer_thread.start()
# Intercept exit signals
signal.signal(signal.SIGTERM, self._graceful_kill)
signal.signal(signal.SIGINT, self._graceful_kill)
def stop(self):
"""
Stop worker thread and event_pool thread's
"""
self._logger.info("Flushing all data and killing the tracker in 5 seconds...")
self._flush_all = True
self._alive = False
i = 0
while True:
# Check if everything is empty or 5 seconds has passed
if self._batch_event_pool.is_empty() and self._event_backlog.is_empty() or i == 5:
self._logger.warning("BatchPool and Backlog are empty or 5 seconds have passed, killing the tracker")
self._is_run_worker = False
self._batch_event_pool.stop()
break
i += 1
time.sleep(1)
def set_debug(self, is_debug): # pragma: no cover
"""
Enable / Disable debug
:param is_debug: Enable printing of debug information
:type is_debug: bool
"""
self._is_debug = is_debug if isinstance(is_debug, bool) else False
self._logger = logger.get_logger(debug=self._is_debug)
self._atom.set_debug(self._is_debug)
def track(self, stream, data, auth_key=""):
"""
Track event
:param stream: Atom stream name
:type stream: str
:param data: Data to send (payload) (dict or string)
:type data: object
:param auth_key: HMAC auth key for stream
:type auth_key: str
"""
if len(auth_key) == 0:
auth_key = self._atom.get_auth()
if not isinstance(data, str):
try:
data = json.dumps(data)
except TypeError as e:
self._error_log(0, time.time(), 400, str(e), data, stream)
return
with self._data_lock:
if stream not in self._stream_keys:
self._stream_keys[stream] = auth_key
try:
self._event_backlog.add_event(Event(stream, data))
self._debug_counter += 1
except Queue.Full:
self._error_log(0, time.time(), 400, "Tracker backlog is full, can't enqueue events", data, stream)
def flush(self):
"""
Flush data from all streams
"""
self._flush_all = True
def _flush_peroidcly(self):
"""
Flush everything every {flush_interval}
Note: the time.time() is used cause python is not accurate enough and adds a delay
when using time.sleep(x) (where x is a constant)
"""
next_call = time.time()
i = 0
while self._is_run_worker:
if i == 10000:
i = 0
# Divide by 1000 since flush_interval is provided in milliseconds
next_call += self._flush_interval / 1000
# This part is here only for better debugging
if i % 2 == 0:
self._logger.debug("Flushing In {} Seconds".format(next_call - time.time()))
i += 1
try:
time.sleep(next_call - time.time())
self.flush()
except (IOError, ValueError) as e:
# Can happen after sleep
self._logger.error("Timer error: {}".format(str(e.args)))
next_call = time.time()
def _tracker_handler(self):
"""
Main tracker function, handles flushing based on given conditions
"""
# Buffer between backlog and batch pool
events_buffer = {}
# Dict to hold events size for every stream
batch_bytes_size = {}
self._logger.info("Tracker Handler Started")
def flush_data(stream, auth_key):
# This 'if' is needed for the flush_all case
if stream in events_buffer and len(events_buffer[stream]) > 0:
temp_buffer = list(events_buffer[stream])
del events_buffer[stream][:]
batch_bytes_size[stream] = 0
self._batch_event_pool.add_event(lambda: self._flush_data(stream, auth_key, temp_buffer))
while self._is_run_worker:
if self._event_backlog.is_empty():
time.sleep(2)
if self._flush_all:
for stream_name, stream_key in self._stream_keys.items():
flush_data(stream_name, stream_key)
if self._alive:
self._flush_all = False
else:
for stream_name, stream_key in self._stream_keys.items():
# Get one event from the backlog
try:
event_object = self._event_backlog.get_event(stream_name)
except Queue.Empty:
continue
if event_object is None:
continue
if stream_name not in batch_bytes_size:
batch_bytes_size[stream_name] = 0
if stream_name not in events_buffer:
events_buffer[stream_name] = []
batch_bytes_size[stream_name] += len(event_object.data.encode("utf8"))
events_buffer[stream_name].append(event_object.data)
if batch_bytes_size[stream_name] >= self._batch_bytes_size:
flush_data(stream_name, auth_key=stream_key)
if len(events_buffer[stream_name]) >= self._batch_size:
flush_data(stream_name, auth_key=stream_key)
self._logger.info("Tracker handler stopped")
def _flush_data(self, stream, auth_key, data):
"""
Send data to server using IronSource Atom Low-level API
NOTE: this function is passed a lambda to the BatchEventPool so it might continue running if it was
triggered already even after a graceful killing for at least (retry_max_count) times
"""
attempt = 1
while True:
try:
response = self._atom.put_events(stream, data=data, auth_key=auth_key)
except Exception as e:
self._error_log(attempt, time.time(), 400, str(e), data, stream)
return
# Response on first try
if attempt == 1:
self._logger.debug('Got Status: {}; Data: {}'.format(str(response.status), str(data)))
# Status 200 - OK or 400 - Client Error
if 200 <= response.status < 500:
if 200 <= response.status < 400:
if self._debug_counter >= 1000:
self._logger.info('Tracked 1000 events to Atom')
self._logger.info('Status: {}; Response: {}; Error: {}'.format(str(response.status),
str(response.data),
str(response.error)))
| |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
"""
Notes for future:
You set up the basic elements of a curve, but not everything
is truly automated. Need to:
1. Automate Coordinates of S,D text
"""
# shift1 is demand shift, shift 2 is supply shift
def supplyAndDemandWithShifts(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1, name= "Loanable Funds", pp = PdfPages("Default.pdf")):
pp = pp
fig = plt.figure(dpi=128, figsize=(10,6))
frame = plt.gca()
plt.title(name, fontsize=20, ha='center')
if vertSupply:
supply = round(len(supply)/2)
print(supply)
if shift1:
if (shift1 != "Supply-Left" and shift1 != "Supply-Right") or vertSupply == False:
firstShift = selectShiftCurve(demand, supply, shift1,order=1)
else:
if shift1 == "Supply-Right":
firstShift = 7000
if shift1 == "Supply-Left":
firstShift = 3000
if shift2:
secondShift = selectShiftCurve(demand, supply,shift1, shift2,order=2)
i = 0
if shift1 and shift2:
xi,yi= findIntersection(supply, demand, inc)
plotCurves(supply, demand,vertSupply, firstShift, secondShift, inc)
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--", vertSupply=vertSupply)
i +=1
# Horizontal and Vertical Lines for First Shift
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
i += 1
if (shift2 == "Demand-Left" or shift2 == "Demand-Right"):
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, supply, inc,i, "k--", xi, yi,vertSupply=vertSupply, shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
x1, y1 = findIntersection(demand, firstShift, inc)
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc, i, "k--", x1, y1,vertSupply=vertSupply,shift2=shift2)
if (shift2 == "Demand-Left" and shift1 == "Supply-Right") or (shift2 == "Demand-Right" and shift1 == "Supply-Left") :
q0.remove
if shift2 == "Supply-Left" or shift2 == "Supply-Right":
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if (shift1 == "Demand-Left" and shift2 == "Supply-Right") or (shift1 == "Demand-Right" and shift2 == "Supply-Left") :
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, demand, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if shift1 == None and shift2 == None:
plotCurves(supply, demand, vertSupply = vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
if shift1 and not shift2:
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1 = shift1)
plotCurves(supply, demand, vertSupply,firstShift, None, inc)
if not shift1 and shift2:
plotCurves(supply, demand,vertSupply, None, secondShift, inc)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
placePrimaryText(vertSupply)
placeShiftText(shift1, shift2,vertSupply=vertSupply)
setupAxes(frame)
plt.savefig(name.replace("\n"," "))
pp.savefig(fig)
# plt.close()
# pp.close()
def placePrimaryText(vertSupply=False):
#plt.text(x,y,text,fontsize)
p = plt.text(-600, 10000, "$\pi$", fontsize=24)
if vertSupply == False:
s = plt.text(8200, 8800,"$SRAS_0$", fontsize = 24)
else:
s = plt.text(5100, 8800, "$LRAS_0$", fontsize = 24)
d = plt.text(8200, 2000,"$AD_0$", fontsize = 24)
q = plt.text(10000, -650, "$\%\Delta y$", fontsize=24)
return p , s , d , q
def placeShiftText(shift1, shift2=None, vertSupply=False):
if shift1 == None:
if (shift2):
placeShiftText(shift2)
else:
return
if shift1 == "Demand-Left":
plt.text(5500, 1650,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8500, 3800,"$AD_1$", fontsize = 24)
if shift1 == "Supply-Left":
if vertSupply == False:
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
else:
plt.text(3100, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Right":
if vertSupply == False:
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
else:
plt.text(7100, 8800,"$LRAS_1$", fontsize = 24)
# safety check . . .
if shift1 and shift2:
if shift2 == "Demand-Left":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(6200, 1000,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(4000, 1600,"$AD_2$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift2 == "Demand-Right":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(8200, 3450,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Demand-Right":
plt.text(9000, 5750,"$AD_2$", fontsize = 24)
if shift2 == "Supply-Left":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(5100, 8800,"$LRAS_2$", fontsize = 24)
if shift1 == "Supply-Right":
plt.text(7755, 8800,"$LRAS_2$", fontsize = 24) # same as initial
if shift2 == "Supply-Right":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(7755, 8800,"$LRAS_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Supply-Right":
plt.text(9750, 6000,"$LRAS_2$", fontsize = 24)
def plotCurves(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1):
# plt.plot((x1,x2), (y1,y2), linestyle/color, linewidth)
if vertSupply == False:
plt.plot(supply, 'C0-', linewidth=3)
else:
plt.axvline(x=supply, color = 'C0', linewidth=3)
plt.plot(demand, 'C0-', linewidth=3)
try:
if isinstance(shift1,np.ndarray):
plt.plot(shift1, 'C3-', linewidth=3)
else:
if shift1 != None:
plt.axvline(x=shift1, color = 'C3', linewidth=3)
except NameError:
print("shift1 = None")
# if not np.all([shift2, supply]) and not np.all([shift2, demand]):
try:
if isinstance(shift2,np.ndarray):
plt.plot(shift2, 'C3-', linewidth=3)
else:
if shift2 != None:
plt.axvline(x=shift2)
except NameError:
print("shift1 = None")
def plotVertAndHorizLines(curve1, curve2, inc, i, line,
xi = None, yi = None, vertSupply=False,shift1=None, shift2=None):
x2,y2 = findIntersection(curve1, curve2, inc)
# plt.plot((x2, x2), (0, y2), line, linewidth=1.5)
plt.plot((0,x2), (y2, y2), line,linewidth=1.5)
if i == 0:
p0 =plt.text(-600,y2, "$\pi_0$", fontsize=20)
q0 = plt.text(x2 - 200, -650, "$\%\Delta y_0$", fontsize=20)
return p0, q0
if i == 1:
p1 = plt.text(-600,y2, "$\pi_1$", fontsize=20)
if vertSupply:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200, -650, "$\%\Delta y_1$", fontsize=20)
else:
q1 = plt.text(x2 - 200, -650, "", fontsize=20)
else:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200 , -650, "$\%\Delta y_1$", fontsize=20)
return p1, q1
if i == 2:
if yi != y2:
p2 = plt.text(-600,y2, "$\pi_2$", fontsize=20)
else:
p2 = plt.text(-1450,y2, "$\pi_2=$", fontsize=20)
if xi != x2:
q2 = plt.text(x2 - 200, -650, "$\%\Delta y_2$", fontsize=20)
else:
q2 = plt.text(x2 + 200, -650, "$_{,2}$", fontsize=20)
return p2, q2
def setupAxes(frame):
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.ylim(0, 10000)
plt.xlim(xmin = 0, xmax = 10000)
plt.xlabel("Real Income", fontsize=20)
plt.ylabel("Price Level", fontsize = 20)
plt.tick_params(axis='both', which='major', labelsize=16)
def findIntersection(curve1, curve2, inc):
try:
for x in range(len(curve1)):
dist = curve1[x] - curve2[x]
if abs(dist) < inc * 1.01:
print(curve1[x])
print(curve2[x])
print("curve1 and curve2 are " + str(dist) + " units apart at x= " + str(x))
return x, curve1[x]
except:
try:
return curve1, curve2[curve1]
except:
return curve2, curve1[curve2]
def selectShiftCurve(demand, supply, shift1, shift2 = None, order=1):
print(shift1)
if order == 1:
if shift1 == "Demand-Left":
return np.arange(7000,-3000, -1 * inc)
if shift1 == "Demand-Right":
return np.arange(12000,2000, -1 * inc)
if shift1 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift1 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
if order == 2:
if shift2 == "Demand-Left" and shift1 == "Demand-Left":
return np.arange(5500,-4500, -1 * inc)
if shift2 == "Demand-Left" and shift1 == "Demand-Right":
return demand
if shift2 == "Demand-Right" and shift1 == "Demand-Right":
return np.arange(14500,4500, -1 * inc)
if shift2 == "Demand-Right" and shift1 == "Demand-Left":
return demand
if shift2 == "Supply-Left" and shift1 == "Supply-Left":
return np.arange(3000, 13000, 1 * inc)
if shift2 == "Supply-Left" and shift1 == "Supply-Right":
return supply
if shift2 == "Supply-Right" and shift1 == "Supply-Right":
return np.arange(-3000,7000, 1 * inc)
if shift2 == "Supply-Right" and shift1 == "Supply-Left":
return supply
else:
if shift2 == "Demand-Left":
return np.arange(8000,-2000, -1 * inc)
if shift2 == "Demand-Right":
return np.arange(11450,1450, -1 * inc)
if shift2 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift2 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
inc = 1
demandInc = inc
supplyInc = inc
Supply = np.arange(0,10000, 1 * supplyInc)
Demand = np.arange(10000,0, -1 * demandInc)
vertSupply | |
BeamNG execution")
ax2.set_xticklabels(['values '], rotation="0")
ax2.set_ylabel("time in seconds")
extent_2 = self.full_extent(ax2).transformed(fig.dpi_scale_trans.inverted())
fig.savefig(self.output_folder + '{}_crisce_beamng_efficiency.jpg'.format(self.process_number), bbox_inches=extent_2)
# If you don't do tight_layout() you'll have weird overlaps
plt.tight_layout()
self.process_number += 1
# plt.show()
plt.close()
def plot_crash(self, vehicle_red, vehicle_blue):
self.plot_road(plt.gca())
# self.plot_script(ax[row, 0])
# self.plot_bbox(plt.gca())
self.plot_bbox_rect(plt.gca(), vehicle=vehicle_red)
self.plot_bbox_rect(plt.gca(), vehicle=vehicle_blue)
# self.plot_overhead(ax[row, 1])
plt.grid(False)
plt.axis(False)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
# plt.show()
plt.close()
def traceVehicleBbox(self):
Bbox = dict()
vehicle_red = self.crash_analysis_log["vehicles"]["red"]["vehicle"]
vehicle_blue = self.crash_analysis_log["vehicles"]["blue"]["vehicle"]
Bbox[vehicle_red.vid] = self.crash_analysis_log["vehicles"]["red"]["final_bboxes"]
Bbox[vehicle_blue.vid] = self.crash_analysis_log["vehicles"]["blue"]["final_bboxes"]
# plt.figure(figsize=(30, 10))
fig_traj, ax = plt.subplots(1, figsize=(30, 20))
self.plot_road(ax, False)
for vehicle in Bbox.keys():
for i, veh_bbox in enumerate(Bbox[vehicle]):
bbox = veh_bbox
# boundary_x = veh_bbox[1]
# boundary_y = veh_bbox[2]
# bbox = vehicle.get_bbox()
boundary_x = [
bbox['front_bottom_left'][0],
bbox['front_bottom_right'][0],
bbox['rear_bottom_right'][0],
bbox['rear_bottom_left'][0],
]
boundary_y = [
bbox['front_bottom_left'][1],
bbox['front_bottom_right'][1],
bbox['rear_bottom_right'][1],
bbox['rear_bottom_left'][1],
]
ax.fill(boundary_x, boundary_y, "r")
if vehicle == "red_vehicle":
ax.fill(boundary_x, boundary_y, 'r-')
else:
ax.fill(boundary_x, boundary_y, 'b-')
triangle_coord = self.computeTriangle(bbox)
# print(triangle_coord)
# ax.Polygon(np.array(triangle_coord), closed=False, color="blue", alpha=0.3, fill=True, edgecolor=None)
poly = plt.Polygon(np.array(triangle_coord), closed=False,
color="white", alpha=1, fill=True, edgecolor=None)
ax.add_patch(poly)
# bounding_boxes_red.append(self.getBboxRect(vehicle=vehicle_red)[1:])
# bounding_boxes_blue.append(self.getBboxRect(vehicle=vehicle_blue)[1:])
# self.plot_bbox_rect(plt.gca(), vehicle=vehicle_red)
# self.plot_bbox_rect(plt.gca(), vehicle=vehicle_blue)
### plot_overhead(ax[row, 1])
plt.axis('off')
plt.grid(False)
plt.axis(False)
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
plt.savefig(self.output_folder + '{}_trace_veh_BBOX.jpg'.format(self.process_number), bbox_inches='tight')
self.process_number += 1
# plt.show()
plt.close()
def close(self):
self.bng.close()
def append_df_to_excel(self, file_path, df, sheet_name='Sheet1', startrow=None,
truncate_sheet=False, file_name=None, overwrite=False,
**to_excel_kwargs):
"""
Append a DataFrame [df] to existing Excel file [file_path]
into [sheet_name] Sheet.
If [file_path] doesn't exist, then this function will create it.
@param file_path: File path or existing ExcelWriter
(Example: '/path/to/file.xlsx')
@param df: DataFrame to save to workbook
@param sheet_name: Name of sheet which will contain DataFrame.
(default: 'Sheet1')
@param startrow: upper left cell row to dump data frame.
Per default (startrow=None) calculate the last row
in the existing DF and write to the next row...
@param truncate_sheet: truncate (remove and recreate) [sheet_name]
before writing DataFrame to Excel file
@param to_excel_kwargs: arguments which will be passed to `DataFrame.to_excel()`
[can be a dictionary]
@return: None
Usage examples:
>>> append_df_to_excel('d:/temp/test.xlsx', df)
>>> append_df_to_excel('d:/temp/test.xlsx', df, header=None, index=False)
>>> append_df_to_excel('d:/temp/test.xlsx', df, sheet_name='Sheet2',
index=False)
>>> append_df_to_excel('d:/temp/test.xlsx', df, sheet_name='Sheet2',
index=False, startrow=25)
(c) [MaxU](https://stackoverflow.com/users/5741205/maxu?tab=profile)
"""
# Excel file doesn't exist - saving and exiting
if not os.path.isfile(file_path):
df.to_excel(
file_path,
sheet_name=sheet_name,
startrow=startrow if startrow is not None else 0,
header=df.columns.to_list(),
**to_excel_kwargs)
# df['file_name'] = df.index
print("File does not exist creating one !!!!")
return
# workbook = xlsxwriter.Workbook(os.path.isfile(file_path))
# workbook.close()
print(df.columns.to_list())
# file_path = file_path.replace("\\", "/")
if (file_name in pd.read_excel(file_path)["file_name"].to_list() and overwrite == False):
print("File name already exist !!! ")
print("For overwriting turn the flag on !!! ")
return
elif(file_name in pd.read_excel(file_path)["file_name"].to_list() and overwrite == True):
print("File name already exist !!! ")
row_index = df[df.file_name == file_name].index
print("row_index", row_index)
# print("File name at index {} is overwritten!!! ".format(row_index))
df.drop(row_index)
df.reset_index(drop=True, inplace=True)
print(df.columns.to_list())
if len(pd.read_excel(file_path)) > 1:
header = None
else:
header = df.columns.to_list()
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(file_path, engine='openpyxl', mode='a')
# try to open an existing workbook
writer.book = load_workbook(file_path)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# truncate sheet
if truncate_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
if startrow is None:
startrow = 0
# write out the new sheet
# df['file_name'] = df.index
df.to_excel(writer, sheet_name, startrow=startrow, header = None, **to_excel_kwargs)
# save the workbook
writer.save()
##### Constants
# RED_CAR_BOUNDARY = np.array([[0, 200, 180], # red internal_1
# [110, 255, 255]])
RED_CAR_BOUNDARY = np.array([[0, 190, 215], # red external_0
[179, 255, 255]])
BLUE_CAR_BOUNDARY = np.array([[85, 50, 60],
[160, 255, 255]])
# blue_car_boundary = np.array([[85, 50, 60], # blue internal_1
# # [160, 255, 255]])
if __name__ == '__main__':
# main()
#### Object Initialization
# crash_files = [122168, 171831, 100343, 128741, 120565,120305, 105222, 103378, 108909, 156722, 120278,
# 165428, 128697, 156010, 122080, 119897, 129224, 137780, 120013, 148154, 108812]
# for crash_num in crash_files:
# file = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\{}\sketch.jpeg".format(crash_num)
# road = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\{}\road.jpeg".format(crash_num)
# external_csv = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\{}\external.csv".format(crash_num)
""" For internal Images"""
### paste the path for the sketch image
# glob.glob("road*")
# sorted(glob.glob("*.jpg") + glob.glob("*.jpeg"))
file = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\99817\sketch.jpeg"
road = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\99817\road.jpeg"
external_csv = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\99817\external.csv"
# # sorted(glob.glob("*.jpg") + glob.glob("*.jpeg"))
# file = "F:\Development\AutonomousCar\Autonomous_Car_Thesis\external_images\100343\sketch.jpeg"
# road = "F:\Development\AutonomousCar\Autonomous_Car_Thesis\external_images\100343\road.jpeg"
# external_csv = "F:\Development\AutonomousCar\Autonomous_Car_Thesis\external_images\100343\external.csv"
# print("Available images in the directory")
# print("road", road)
# print("\n", file)
dir_path = r"F:\Development\AutonomousCar\CRISCE\Datasets\External_Validity\99817"
sketch_type_external = True
external_impact_points = None
print("{}\*.csv".format(dir_path))
if sketch_type_external:
df = pd.read_csv(external_csv)
external_impact_points = dict()
for i in df.index:
color = str.lower(df.vehicle_color[i])
impact = str.lower(df.impact_point[i])
external_impact_points[color] = dict()
external_impact_points[color] = impact
if sketch_type_external:
RED_CAR_BOUNDARY = np.array([[0, 190, 215], # red external crash sketches
[179, 255, 255]])
else:
RED_CAR_BOUNDARY = np.array([[0, 200, 180], # red internal crash sketches
[110, 255, 255]])
show_image = False # False # True
# show_image = True
######## Main Logic Of the Code Starts Here ################
car = Car()
roads = Roads()
kinematics = Kinematics()
pre_process = Pre_Processing()
sketch = file
output_folder = os.path.join(dir_path, "output") # sketch.split(".")[0])
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# car_length_sim = 5.3
car_length_sim = 4.670000586694935 # 4.6698
sketch_image_path = sketch
road_image_path = road
print("\nFile Name = ", sketch)
#### ------ Read Sketch Image ------ #######
image = pre_process.readImage(sketch_image_path)
car.setColorBoundary(red_boundary=RED_CAR_BOUNDARY, blue_boundary=BLUE_CAR_BOUNDARY)
vehicles, time_efficiency = car.extractVehicleInformation(image_path=sketch_image_path, time_efficiency=dict(),
show_image=show_image, output_folder=output_folder,
external=sketch_type_external, external_impact_points=external_impact_points,
crash_impact_locations=crash_impact_model, car_length_sim=car_length_sim)
car_length, car_width = car.getCarDimensions()
height, width = car.getImageDimensions()
roads, lane_nodes = roads.extractRoadInformation(image_path=road_image_path, time_efficiency=time_efficiency, show_image=show_image,
output_folder= output_folder, car_length=car_length, car_width=car_width,
car_length_sim=car_length_sim)
vehicles, time_efficiency = kinematics.extractKinematicsInformation(image_path=sketch_image_path, vehicles=vehicles, time_efficiency=time_efficiency,
output_folder=output_folder, show_image=show_image)
# print(vehicles)
simulation_folder = os.path.join(output_folder, "simulation/")
if not os.path.exists(simulation_folder):
os.makedirs(simulation_folder)
simulation = Simulation(vehicles=vehicles, roads=roads,
lane_nodes=lane_nodes, kinematics=kinematics,
time_efficiency=time_efficiency, output_folder=simulation_folder,
car_length=car_length, car_width=car_width,
car_length_sim=car_length_sim, sketch_type_external=sketch_type_external,
height=height, width=width)
simulation.bng, simulation.scenario = simulation.setupBeamngSimulation(file)
simulation.aerialViewCamera()
###### ------ Plotting Roads ---------- ##########
fig = plt.figure(figsize=(30, 20))
simulation.plot_road(plt.gca())
plt.gca().set_aspect("auto")
# plt.axis('off')
plt.axis(False)
plt.gca().invert_yaxis()
plt.gca().invert_xaxis()
# plt.show()
plt.savefig(simulation.output_folder + '{}_sim_plot_road.jpg'.format(simulation.process_number), bbox_inches='tight')
simulation.process_number += 1
# fig.savefig(simulation.output_folder + ".jpg", bbox_inches='tight')
plt.close()
simulation.plotSimulationCrashSketch()
simulation.initiateDrive()
simulation.postCrashDamage()
for vehicle_color in vehicles:
ref_impact_side = vehicles[vehicle_color]["impact_point_details"]["internal_impact_side"]
print(vehicle_color, ref_impact_side)
simulation.effectAndAccurayOfSimulation()
road_similarity = simulation.computeRoadGeometricSimilarity()
placement_similarity, orientation_similarity = simulation.computeVehiclesSimilarity()
simulation.computeBboxTrajectory(image.copy(), show_image=show_image)
simulation_accuracy = simulation.computeSimulationAccuracy(road_similarity, placement_similarity, orientation_similarity)
total_time = simulation.computeCrisceEfficiency()
simulation.plotCrisceEfficiency(total_time)
simulation.traceVehicleBbox()
""" For storing values in Excel file"""
# analysis_log_df = pd.DataFrame([[simulation.log["vehicles"],
# {"red": simulation.crash_analysis_log["vehicles"]["red"]["simulation_trajectory"],
# "blue": simulation.crash_analysis_log["vehicles"]["blue"]["simulation_trajectory"]},
# simulation.log["simulated_impact"], simulation.log["road_similarity"],
# simulation.log["placement_similarity"], simulation.log["orientation_similarity"],
# simulation.log["quality_of_env"], simulation.log["red_side_match"],
# simulation.log["blue_side_match"], simulation.log["quality_of_crash"],
# simulation.log["red_cum_iou"], simulation.log["blue_cum_iou"],
# simulation.log["quality_of_traj"],
# {"red": simulation.log["vehicles"]["red"]["crash_veh_disp_error"],
# "blue": simulation.log["vehicles"]["blue"]["crash_veh_disp_error"]},
# {"red": simulation.log["vehicles"]["red"]["crash_veh_IOU"],
# "blue": simulation.log["vehicles"]["blue"]["crash_veh_IOU"]},
# simulation.log["simulation_accuracy"],
# simulation.log["total_time"]]],
# index=[file_name], columns=['vehicles', "simulation_trajectory", 'simulated_impact',
# 'road_similarity', 'placement_similarity',
# 'orientation_similarity', "quality_of_environment",
# "red_side_match", "blue_side_match", "quality_of_crash",
# "red_cum_iou", "blue_cum_iou", "quality_of_trajectory",
# "crash_veh_disp_error", "crash_veh_IOU",
# 'simulation_accuracy', 'total_time'])
# # # analysis_log_df.to_excel("output.xlsx")
# # simulation.append_df_to_excel(dir_path + "\output.xlsx", analysis_log_df, sheet_name='Sheet1')
# index_value = analysis_log_df.index.values
# # print("index values", p)
# analysis_log_df.insert(0, column="file_name", value=index_value)
# analysis_log_df.reset_index(drop=True, inplace=True)
# simulation.append_df_to_excel(dir_path + "\output\output.xlsx", analysis_log_df, sheet_name='Sheet1', file_name=file, overwrite=True, index=False)
simulation.close()
# df = pd.read_excel(dir_path + "\output\output.xlsx")
# df.to_json(dir_path + "\output\output.json")
for v_color in vehicles:
vehicles[v_color]["trajectories"]["computed"]["bezier_curve"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["computed"]["bezier_curve"]]
vehicles[v_color]["trajectories"]["computed"]["b_spline"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["computed"]["b_spline"]]
vehicles[v_color]["trajectories"]["computed"]["cubic_spline"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["computed"]["cubic_spline"]]
vehicles[v_color]["trajectories"]["original_trajectory"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["original_trajectory"]]
vehicles[v_color]["trajectories"]["distorted_trajectory"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["distorted_trajectory"]]
vehicles[v_color]["trajectories"]["simulation_trajectory"] = [waypoint.tolist() for waypoint in vehicles[v_color]["trajectories"]["simulation_trajectory"]]
log = {"vehicles": vehicles,
"road_nodes": lane_nodes,
"vehicle_crash_specifics": simulation.log["vehicles"],
"simulation_trajectory":{"red": simulation.crash_analysis_log["vehicles"]["red"]["simulation_trajectory"],
"blue": simulation.crash_analysis_log["vehicles"]["blue"]["simulation_trajectory"]},
"simulation_rotations": {"red": simulation.crash_analysis_log["vehicles"]["red"]["rotations"],
"blue": simulation.crash_analysis_log["vehicles"]["blue"]["rotations"]},
"sim_veh_speed": {"red": simulation.crash_analysis_log["vehicles"]["red"]["wheel_speed"],
"blue": simulation.crash_analysis_log["vehicles"]["blue"]["wheel_speed"]},
"sim_time": {"red": simulation.crash_analysis_log["vehicles"]["red"]["sim_time"],
"blue": simulation.crash_analysis_log["vehicles"]["blue"]["sim_time"]},
"simulated_impact": simulation.log["simulated_impact"],
"road_similarity": simulation.log["road_similarity"],
"placement_similarity" : simulation.log["placement_similarity"],
"orientation_similarity": simulation.log["orientation_similarity"],
"quality_of_env": simulation.log["quality_of_env"], "red_side_match" : simulation.log["red_side_match"],
"blue_side_match": simulation.log["blue_side_match"], "quality_of_crash": simulation.log["quality_of_crash"],
"red_cum_iou" : simulation.log["red_cum_iou"], "blue_cum_iou": simulation.log["blue_cum_iou"],
"quality_of_traj": simulation.log["quality_of_traj"],
"crash_disp_error": {"red": simulation.log["vehicles"]["red"]["crash_veh_disp_error"],
"blue": simulation.log["vehicles"]["blue"]["crash_veh_disp_error"]},
"crash_IOU": {"red": simulation.log["vehicles"]["red"]["crash_veh_IOU"],
"blue": | |
<reponame>nicholasadamou/python-proxy<filename>venv/lib/python3.7/site-packages/scapy/contrib/diameter.py<gh_stars>0
##########################################################################
#
# Diameter protocol implementation for Scapy
# Original Author: <NAME>
#
# This implements the base Diameter protocol RFC6733 and the additional standards: # noqa: E501
# RFC7155, RFC4004, RFC4006, RFC4072, RFC4740, RFC5778, RFC5447, RFC6942, RFC5777 # noqa: E501
# ETS29229 V12.3.0 (2014-09), ETS29272 V13.1.0 (2015-03), ETS29329 V12.5.0 (2014-12), # noqa: E501
# ETS29212 V13.1.0 (2015-03), ETS32299 V13.0.0 (2015-03), ETS29210 V6.7.0 (2006-12), # noqa: E501
# ETS29214 V13.1.0 (2015-03), ETS29273 V12.7.0 (2015-03), ETS29173 V12.3.0 (2015-03), # noqa: E501
# ETS29172 V12.5.0 (2015-03), ETS29215 V13.1.0 (2015-03), ETS29209 V6.8.0 (2011-09), # noqa: E501
# ETS29061 V13.0.0 (2015-03), ETS29219 V13.0.0 (2014-12)
#
# IMPORTANT note:
#
# - Some Diameter fields (Unsigned64, Float32, ...) have not been tested yet due to lack # noqa: E501
# of network captures containing AVPs of that types contributions are welcomed. # noqa: E501
#
##########################################################################
# scapy.contrib.description = Diameter
# scapy.contrib.status = loads
import socket
import struct
from time import ctime
from scapy.packet import Packet, bind_layers
from scapy.fields import ConditionalField, EnumField, Field, FieldLenField, \
FlagsField, IEEEDoubleField, IEEEFloatField, IntEnumField, IntField, \
LongField, PacketListField, SignedIntField, StrLenField, X3BytesField, \
XByteField, XIntField
from scapy.layers.inet import TCP
from scapy.layers.sctp import SCTPChunkData
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.compat import chb, orb, raw, bytes_hex, plain_str
from scapy.error import warning
from scapy.utils import inet_ntoa, inet_aton
from scapy.pton_ntop import inet_pton, inet_ntop
#####################################################################
#####################################################################
#
# Definition of additional fields
#
#####################################################################
#####################################################################
class I3BytesEnumField (X3BytesField, EnumField):
""" 3 bytes enum field """
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "!I")
class I3FieldLenField(X3BytesField, FieldLenField):
__slots__ = ["length_of", "count_of", "adjust"]
def __init__(
self,
name,
default,
length_of=None,
count_of=None,
adjust=lambda pkt,
x: x):
X3BytesField.__init__(self, name, default)
self.length_of = length_of
self.count_of = count_of
self.adjust = adjust
def i2m(self, pkt, x):
return FieldLenField.i2m(self, pkt, x)
###########################################################
# Fields for Diameter commands
###########################################################
class DRFlags (FlagsField):
def i2repr(self, pkt, x):
if x is None:
return "None"
res = hex(int(x))
r = ''
cmdt = (x & 128) and ' Request' or ' Answer'
if x & 15: # Check if reserved bits are used
nb = 8
offset = 0
else: # Strip the first 4 bits
nb = 4
offset = 4
x >>= 4
for i in range(nb):
r += (x & 1) and str(self.names[offset + i][0]) or '-'
x >>= 1
invert = r[::-1]
return res + cmdt + ' (' + invert[:nb] + ')'
class DRCode (I3BytesEnumField):
def __init__(self, name, default, enum):
"""enum is a dict of tuples, so conversion is required before calling the actual init method. # noqa: E501
Note: the conversion is done only once."""
enumDict = {}
for k, v in enum.items():
enumDict[k] = v[0]
I3BytesEnumField.__init__(self, name, default, enumDict)
def i2repr(self, pkt, x):
cmd = self.i2repr_one(pkt, x)
sx = str(x)
if cmd == sx:
cmd = 'Unknown'
return sx + " (" + cmd + ")"
###########################################################
# Fields for Diameter AVPs
###########################################################
class AVPFlags (FlagsField):
def i2repr(self, pkt, x):
if x is None:
return "None"
res = hex(int(x))
r = ''
if x & 31: # Check if reserved bits are used
nb = 8
offset = 0
else: # Strip the first 5 bits
nb = 3
offset = 5
x >>= 5
for i in range(nb):
r += (x & 1) and str(self.names[offset + i][0]) or '-'
x >>= 1
invert = r[::-1]
return res + ' (' + invert[:nb] + ')'
class AVPVendor (IntField):
def i2repr(self, pkt, x):
vendor = vendorList.get(x, "Unkown_Vendor")
return "%s (%s)" % (vendor, str(x))
# Note the dictionary below is minimal (taken from scapy/layers/dhcp6.py
# + added 3GPP and ETSI
vendorList = {
9: "ciscoSystems",
35: "Nortel Networks",
43: "3Com",
311: "Microsoft",
323: "Tekelec",
2636: "Juniper Networks, Inc.",
4526: "Netgear",
5771: "Cisco Systems, Inc.",
5842: "Cisco Systems",
8164: "Starent Networks",
10415: "3GPP",
13019: "ETSI",
16885: "Nortel Networks"}
# The Application IDs for the Diameter command field
AppIDsEnum = {
0: "Diameter_Common_Messages",
1: "NASREQ_Application",
2: "Mobile_IPv4_Application",
3: "Diameter_Base_Accounting",
4: "Diameter_Credit_Control_Application",
5: "EAP_Application",
6: "Diameter_Session_Initiation_Protocol_(SIP)_Application",
7: "Diameter_Mobile_IPv6_IKE___(MIP6I)",
8: "Diameter_Mobile_IPv6_Auth__(MIP6A)",
111: "ALU_Sy",
555: "Sun_Ping_Application",
16777216: "3GPP_Cx",
16777217: "3GPP_Sh",
16777222: "3GPP_Gq",
16777223: "3GPP_Gmb",
16777224: "3GPP_Gx",
16777227: "Ericsson_MSI",
16777228: "Ericsson_Zx",
16777229: "3GPP_RX",
16777231: "Diameter_e2e4_Application",
16777232: "Ericsson_Charging-CIP",
16777236: "3GPP_Rx",
16777238: "3GPP_Gx",
16777250: "3GPP_STa",
16777251: "3GPP_S6a/S6d",
16777252: "3GPP_S13/S13'",
16777255: "3GPP_SLg",
16777264: "3GPP_SWm",
16777265: "3GPP_SWx",
16777266: "3GPP_Gxx",
16777267: "3GPP_S9",
16777269: "Ericsson_HSI",
16777272: "3GPP_S6b",
16777291: "3GPP_SLh",
16777292: "3GPP_SGmb",
16777302: "3GPP_Sy",
16777304: "Ericsson_Sy",
16777315: "Ericsson_Diameter_Signalling_Controller_Application_(DSC)",
4294967295: "Relay",
}
###########################################################
# Definition of fields contained in section 4.2 of RFC6733
# for AVPs payloads
###########################################################
class OctetString (StrLenField):
def i2repr(self, pkt, x):
try:
return plain_str(x)
except BaseException:
return bytes_hex(x)
class Integer32 (SignedIntField):
pass
class Integer64 (Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "q")
class Unsigned32 (IntField):
pass
class Unsigned64 (LongField):
pass
class Float32 (IEEEFloatField):
pass
class Float64 (IEEEDoubleField):
pass
###########################################################
# Definition of additional fields contained in section 4.3
# of RFC6733 for AVPs payloads
###########################################################
class Address (StrLenField):
def i2repr(self, pkt, x):
if x.startswith(b'\x00\x01'): # IPv4 address
return inet_ntoa(x[2:])
elif x.startswith(b'\x00\x02'): # IPv6 address
return inet_ntop(socket.AF_INET6, x[2:])
else: # Address format not yet decoded
print('Warning: Address format not yet decoded.')
return bytes_hex(x)
def any2i(self, pkt, x):
if x and isinstance(x, str):
try: # Try IPv4 conversion
s = inet_aton(x)
return b'\x00\x01' + s
except BaseException:
try: # Try IPv6 conversion
s = inet_pton(socket.AF_INET6, x)
return b'\x00\x02' + s
except BaseException:
print('Warning: Address format not supported yet.')
return b''
class Time (IntField):
def i2repr(self, pkt, x):
return ctime(x)
class Enumerated (IntEnumField):
def i2repr(self, pkt, x):
if x in self.i2s:
return self.i2s[x] + " (%d)" % x
else:
return repr(x) + " (Unknown)"
class IPFilterRule (StrLenField):
pass
class Grouped (StrLenField):
"""This class is just for declarative purpose because it is used in the AVP definitions dict.""" # noqa: E501
pass
####################################################################
# Definition of additional fields contained in other standards
####################################################################
class QoSFilterRule (StrLenField): # Defined in 4.1.1 of RFC7155
pass
class ISDN (StrLenField):
def i2repr(self, pkt, x):
out = b''
for char in x:
c = orb(char)
out += chb(48 + (c & 15)) # convert second digit first
v = (c & 240) >> 4
if v != 15:
out += chb(48 + v)
return out
def any2i(self, pkt, x):
out = b''
if x:
fd = True # waiting for first digit
for c in x:
digit = orb(c) - 48
if fd:
val = digit
else:
val = val + 16 * digit
out += chb(val)
fd = not fd
if not fd: # Fill with 'f' if odd number of characters
out += chb(240 + val)
return out
#####################################################################
#####################################################################
#
# AVPs classes and definitions
#
#####################################################################
#####################################################################
AVP_Code_length = 4
AVP_Flag_length = 1
DIAMETER_BYTES_ALIGNMENT = 4
AVP_Flags_List = ["x", "x", "x", "x", "x", "P", "M", "V"]
def GuessAvpType(p, **kargs):
if len(p) > AVP_Code_length + AVP_Flag_length:
# Set AVP code and vendor
avpCode = struct.unpack("!I", p[:AVP_Code_length])[0]
vnd = bool(struct.unpack(
"!B", p[AVP_Code_length:AVP_Code_length + AVP_Flag_length])[0] & 128) # noqa: E501
vndCode = vnd and struct.unpack("!I", p[8:12])[0] or 0
# Check if vendor and code defined and fetch the corresponding AVP
# definition
if vndCode in AvpDefDict:
AvpVndDict = AvpDefDict[vndCode]
if avpCode in AvpVndDict:
# Unpack only the first 4 tuple items at this point
avpName, AVPClass, flags = AvpVndDict[avpCode][:3]
result = AVPClass(p, **kargs)
result.name = 'AVP ' + avpName
return result
# Packet too short or AVP vendor or AVP code not found ...
return AVP_Unknown(p, **kargs)
class AVP_Generic (Packet):
""" Parent class for the 5 following AVP intermediate classes below"""
def extract_padding(self, s):
nbBytes = self.avpLen % DIAMETER_BYTES_ALIGNMENT
if nbBytes:
nbBytes = DIAMETER_BYTES_ALIGNMENT - nbBytes
return s[:nbBytes], s[nbBytes:]
def post_build(self, p, pay):
nbBytes = (-len(p)) % 4
while nbBytes:
p += struct.pack("B", 0)
nbBytes -= 1
return p + pay
def show2(self):
self.__class__(raw(self), name=self.name).show()
def AVP(avpId, **fields):
""" Craft an AVP based on its id and optional parameter fields"""
val = None
classType = AVP_Unknown
if isinstance(avpId, str):
try:
for vnd in AvpDefDict:
for code in AvpDefDict[vnd]:
val = AvpDefDict[vnd][code]
if val[0][:len(
avpId)] == avpId: # A prefix of the full name is considered valid # noqa: E501
raise
found = False
except BaseException:
found = True
else:
if isinstance(avpId, list):
code = avpId[0]
vnd = avpId[1]
else: # Assume this is an int
code = avpId
vnd = 0
try:
val = AvpDefDict[vnd][code]
found = True
except BaseException:
found = False
if not found:
warning('The AVP identifier %s has not | |
<filename>raredecay/tools/data_storage.py
"""
@author: <NAME> "Mayou36"
DEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat
DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!
This module contains the data handling. The main part is the class which
takes data, weights, targets, names and converts automatically, plots and more.
"""
import copy
import warnings
import math
import random
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from rep.data.storage import LabeledDataStorage
from ..tools import data_tools, dev_tool
try:
from raredecay.globals_ import out
out_imported = True
except ImportError:
warnings.warn(
ImportWarning,
"could not import out. Some functions regarding output"
+ "(save figure etc.) won't be available",
)
out_imported = False
# TODO: import config not needed?? remove because its from the old structure
# import configuration
from .. import meta_config as meta_cfg
from .. import config as cfg
modul_logger = dev_tool.make_logger(__name__, **cfg.logger_cfg)
class HEPDataStorage:
"""Data-storage for data, weights, targets; conversion; plots and more"""
# define constants for independence
__ROOT_DATATYPE = meta_cfg.ROOT_DATATYPE
__figure_number = 0
__figure_dic = {}
latex_replacements = {
# "CHI2": r"\chi^2",
r"_PT": r" p_T",
r"JPsi": r"J/\psi",
r"K1": r"K_1 ",
r"_1270": "",
r"_ENDVERTEX_CHI2": r"\ \chi^2_{VTX}",
r"_IPCHI2": r"\ \chi^2_{IP}",
r"_FDCHI2": r"\ \chi^2_{FD}",
r"_TRACK_CHI2": r"\ \chi^2_{track}",
r"_OWNPV": r"\ ownPV",
r"_CosTheta": r"\ cos(\theta)",
r"NDOF": r"/N_{degree of freedom}",
r"AMAXDOCA": r"\ AMAXDOCA",
# "_": "\ "
}
def __init__(
self,
data,
index=None,
target=None,
sample_weights=None,
data_name=None,
data_name_addition=None,
column_alias=None,
):
"""Initialize instance and load data.
Parameters
----------
data : |data_type|
The data itself. This can be two different types
- **root-tree dict** (*root-dict*):
Dictionary which specifies all the information to convert a root-
tree to an array. Directly given to :py:func:`~root_numpy.root2rec`
- **pandas DataFrame**:
A pandas DataFrame. The index (if not explicitly defined)
and column names will be taken.
index : 1-D array-like
The indices of the data that will be used.
target : list or 1-D array or int {0, 1}
Labels the data for the machine learning. Usually the y.
sample_weights : |sample_weights_type|
|sample_weights_docstring|
.. note:: If None or 1 specified, 1 will be assumed for all.
data_name : str
| Name of the data, human-readable. Displayed in the title of
plots.
| *Example: 'Bu2K1piee mc', 'beta-decay real data' etc.*
data_name_addition : str
| Additional remarks to the data, human readable. Displayed in
the title of plots.
| *Example: 'reweighted', 'shuffled', '5 GeV cut applied' etc.*
column_alias : |column_alias_type|
|column_alias_docstring|
"""
# initialize logger
self.logger = modul_logger
# initialize index
# self._index = None
# initialize data
# self._data = None
self._data_type = None
self.column_alias = {} if column_alias is None else column_alias
self._fold_index = None # list with indeces of folds
self._fold_status = None # tuple (my_fold_number, total_n_folds)
self._length = None
self.set_data(data=data, index=index)
# self._columns = None
# data name
self._name = ["", "", ""]
data_name = "unnamed data" if data_name is None else data_name
self.data_name = data_name
self.data_name_addition = data_name_addition
self.fold_name = None
# initialize targets
self._set_target(target=target)
# # data-labels human readable, initialize with the column name
# self._label_dic = {}
# self._label_dic = {col: col for col in self.columns if self._label_dic.get(col) is None}
# TODO: delete?
# self.set_labels(data_labels=data_labels)
# initialize weights
self._weights = None
self.set_weights(sample_weights)
# plot settings
hist_settings = meta_cfg.DEFAULT_HIST_SETTINGS
self.hist_settings = hist_settings
self.supertitle_fontsize = 18
def __len__(self):
if self._length is None:
self._set_length()
return self._length
# TODO: remove obsolet
def get_name(self):
"""Return the human-readable name of the data as a string."""
warnings.warn(
"Depreceated, obj.get_name() will be removed. Use obj.name instead.",
FutureWarning,
)
return self._get_name()
@property
def name(self):
"""Return the **full** human-readable name of the data as a string."""
return self._get_name()
def _get_name(self):
out_str = data_tools.obj_to_string(self._name, separator=" ")
return out_str
def _set_name(self, data_name=None, data_name_addition=None, fold_name=None):
"""Set the data name."""
# set the new name in self._name
for i, name in enumerate([data_name, data_name_addition, fold_name]):
if name is not None:
self._name[i] = str(name)
# TODO: change the naming into a dict?
@property
def data_name(self):
"""The name of the data."""
return self._name[0]
@property
def data_name_addition(self):
"""The data name addition."""
return self._name[1]
@property
def fold_name(self):
"""The name of the fold (like *fold 2 of 5*)."""
return self._name[2]
@data_name.setter
def data_name(self, data_name):
self._set_name(data_name=data_name)
@data_name_addition.setter
def data_name_addition(self, data_name_addition):
self._set_name(data_name_addition=data_name_addition)
@fold_name.setter
def fold_name(self, fold_name):
self._set_name(fold_name=fold_name)
@property
def data_type(self):
""" "Return the data-type like 'root', 'df' etc."""
return self._data_type
def get_index(self):
"""Return the index used inside the DataStorage. Advanced feature."""
warnings.warn(
"Will be removed in the future. Use obj.index instead", FutureWarning
)
return self._make_index()
@property
def index(self):
"""The internal index"""
return self._make_index()
@index.setter
def index(self, index):
self._set_index(index)
def _make_index(self, index=None):
"""Return the index, else the self._index. If none exist, **create**
the normal one
It has the following priorities:
1. if the given index is not None, it will be taken
2. next look for the self._index. If there is one, it will be returned
3. otherwise, a list of indeces as usuall (0, len-1) will be returned
"""
if index is None:
temp = list(range(len(self))) if self._index is None else self._index
else:
temp = index
return temp
def _set_index(self, index):
"""If index is not None -> assign. Else try to get from data"""
if index is None:
self._index = None
if self._data_type == "root":
pass # no index contained in root-dicts
elif self._data_type == "array":
pass # no index information contained in an array
elif self._data_type == "df":
index_list = self._data.index.tolist()
# TODO: remove HACK with length, replace with len(self)
if not index_list == list(range(len(self))): # if special indexing
self._index = index_list
else:
self._index = index
@property
def columns(self):
"""The columns/branches of the data"""
return self._columns
@columns.setter
def columns(self, columns):
# TODO: maybe check?
if columns is not None:
columns = data_tools.to_list(columns)
columns = dev_tool.entries_to_str(columns)
self._set_columns(columns=columns)
def _set_columns(self, columns):
if columns is None:
if self._data_type == "root":
self._columns = data_tools.to_list(self._data["branches"])
elif self._data_type == "df":
self._columns = data_tools.to_list(self._data.columns.values)
# TODO: remove below?
# elif self._data_type == 'array':
# self._columns = ['feature_' + str(i) for i in range(len(self._data))]
else:
self._columns = data_tools.to_list(columns)
self._columns = [str(col) for col in self._columns]
def _set_length(self):
# determine whether to set length individually from the data or not
index = self._index
if index is None:
if self._data_type == "root":
temp_root_dict = copy.deepcopy(self._data)
temp_branch = temp_root_dict.pop(
"branches"
) # remove to only use one branch
temp_branch = data_tools.to_list(temp_branch)
self._length = len(
data_tools.to_pandas(
dict(branches=temp_branch[0], **temp_root_dict)
)
)
elif self._data_type == "df":
self._length = len(self._data)
# TODO: remove below?
# elif self._data_type == 'array':
# self._length = self._data.shape[1]
else:
self._length = len(index)
@staticmethod
def _get_data_type(data):
"""Return the type of the data.
- 'df' : pandas DataFrame
- 'root': root-file
- 'array': numpy array
"""
data_type = None
if isinstance(data, dict):
if "filenames" in data and data["filenames"].endswith(
HEPDataStorage.__ROOT_DATATYPE
):
data_type = "root"
elif isinstance(data, pd.DataFrame):
data_type = "df"
# TODO: remove below
# elif isinstance(data, (np.ndarray, np.array)):
# data_type = 'array'
return data_type
@property
def data(self):
"""Return the data as is without conversion, e.g. a root-dict, pandasDF etc."""
return self._data
def set_data(self, data, index=None, columns=None, column_alias=None):
"""Set the data and also change index and columns.
Parameters
----------
data : |data_type|
The new data
index : |index_type|
|index_docstring|
columns : list(str, str, str,...)
The columns for the data to use
column_alias : |column_alias_type|
|column_alias_docstring|
"""
if column_alias is not None:
self.column_alias.update(column_alias)
self.column_alias = dev_tool.entries_to_str(column_alias)
self._set_data(data=data, index=index, columns=columns)
def _set_data(self, data, index=None, columns=None):
"""Set the data, length- and columns-attribute.
Convert the data to the right (root-dict, df etc.) format (and save).
Also set the length and columns.
currently implemented:
- ROOT-data file (*root-dict*)
- Pandas DataFrame
"""
# Python2/3 compatibility, str
if isinstance(data, dict):
data = dev_tool.entries_to_str(data)
# get the data_type
self._data = data
self._data_type = self._get_data_type(data)
self.index = index
self.columns = columns
self._set_length()
# convert the data (and save it)
# root data
if self._data_type == "root":
pass
# pandas DataFrame
elif self._data_type == "df":
self._data = self._make_df(index=self._index) # No cols, it's set above
# TODO: remove below?
# elif self._data_type == 'array':
# self._data = self._make_df(index=self._index)
# warnings.warn(DeprecationWarning, "Not safe, it's better to use pandas DataFrame")
else:
raise NotImplementedError("Other dataformats are not yet implemented")
def get_weights(self, index=None, normalize=True, **kwargs):
| |
<filename>testnet/prolog/interpreter/arithmetic.py
import py
import math
from prolog.interpreter.parsing import TermBuilder
from prolog.interpreter import helper, term, error
from prolog.interpreter.signature import Signature
from prolog.interpreter.error import UnificationFailed
from rpython.rlib.rarithmetic import intmask, ovfcheck_float_to_int
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib import jit, rarithmetic
from rpython.rlib.rbigint import rbigint
Signature.register_extr_attr("arithmetic")
def eval_arithmetic(engine, obj):
result = obj.eval_arithmetic(engine)
return make_int(result)
class CodeCollector(object):
def __init__(self):
self.code = []
self.blocks = []
def emit(self, line):
for line in line.split("\n"):
self.code.append(" " * (4 * len(self.blocks)) + line)
def start_block(self, blockstarter):
assert blockstarter.endswith(":")
self.emit(blockstarter)
self.blocks.append(blockstarter)
def end_block(self, starterpart=""):
block = self.blocks.pop()
assert starterpart in block, "ended wrong block %s with %s" % (
block, starterpart)
def tostring(self):
assert not self.blocks
return "\n".join(self.code)
def wrap_builtin_operation(name, num_args):
fcode = CodeCollector()
fcode.start_block('def prolog_%s(engine, query):' % name)
for i in range(num_args):
fcode.emit('var%s = query.argument_at(%s).eval_arithmetic(engine)' % (i, i))
if num_args == 1:
fcode.emit('return var0.arith_%s()' % name)
elif num_args == 2:
fcode.emit('return var0.arith_%s(var1)' % name)
fcode.end_block('def')
miniglobals = globals().copy()
exec py.code.Source(fcode.tostring()).compile() in miniglobals
result = miniglobals['prolog_' + name]
return result
# remove unneeded parts, use sane names for operations
simple_functions = [
("+", 2, "add"),
("+", 1, "unaryadd"),
("-", 2, "sub"),
("-", 1, "unarysub"),
("*", 2, "mul"),
("/", 2, "div"),
("//", 2, "floordiv"),
("**", 2, "pow"),
("sqrt", 1, "sqrt"),
(">>", 2, "shr"),
("<<", 2, "shl"),
("\\/", 2, "or"),
("/\\", 2, "and"),
("xor", 2, "xor"),
("mod", 2, "mod"),
("\\", 1, "not"),
("abs", 1, "abs"),
("max", 2, "max"),
("min", 2, "min"),
("round", 1, "round"),
("floor", 1, "floor"), #XXX
("ceiling", 1, "ceiling"), #XXX
("float_fractional_part", 1, "float_fractional_part"), #XXX
("float_integer_part", 1, "float_integer_part")
]
for prolog_name, num_args, name in simple_functions:
f = wrap_builtin_operation(name, num_args)
signature = Signature.getsignature(prolog_name, num_args)
signature.set_extra("arithmetic", f)
for suffix in ["", "_number", "_bigint", "_float"]:
def not_implemented_func(*args):
raise NotImplementedError("abstract base class")
setattr(term.Numeric, "arith_%s%s" % (name, suffix), not_implemented_func)
@jit.elidable_promote('all')
def get_arithmetic_function(signature):
return signature.get_extra("arithmetic")
def make_int(w_value):
if isinstance(w_value, term.BigInt):
try:
num = w_value.value.toint()
except OverflowError:
pass
else:
return term.Number(num)
return w_value
class __extend__(term.Numeric):
def arith_sqrt(self):
return self.arith_pow(term.Float(0.5))
class __extend__(term.Number):
# ------------------ addition ------------------
def arith_add(self, other):
return other.arith_add_number(self.num)
def arith_add_number(self, other_num):
try:
res = rarithmetic.ovfcheck(other_num + self.num)
except OverflowError:
return self.arith_add_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_add_bigint(self, other_value):
return make_int(term.BigInt(other_value.add(rbigint.fromint(self.num))))
def arith_add_float(self, other_float):
return term.Float(other_float + float(self.num))
def arith_unaryadd(self):
return self
# ------------------ subtraction ------------------
def arith_sub(self, other):
return other.arith_sub_number(self.num)
def arith_sub_number(self, other_num):
try:
res = rarithmetic.ovfcheck(other_num - self.num)
except OverflowError:
return self.arith_sub_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_sub_bigint(self, other_value):
return make_int(term.BigInt(other_value.sub(rbigint.fromint(self.num))))
def arith_sub_float(self, other_float):
return term.Float(other_float - float(self.num))
def arith_unarysub(self):
try:
res = rarithmetic.ovfcheck(-self.num)
except OverflowError:
return term.BigInt(rbigint.fromint(self.num).neg())
return term.Number(res)
# ------------------ multiplication ------------------
def arith_mul(self, other):
return other.arith_mul_number(self.num)
def arith_mul_number(self, other_num):
try:
res = rarithmetic.ovfcheck(other_num * self.num)
except OverflowError:
return self.arith_mul_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_mul_bigint(self, other_value):
return make_int(term.BigInt(other_value.mul(rbigint.fromint(self.num))))
def arith_mul_float(self, other_float):
return term.Float(other_float * float(self.num))
# ------------------ division ------------------
def arith_div(self, other):
return other.arith_div_number(self.num)
def arith_div_number(self, other_num):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
try:
res = rarithmetic.ovfcheck(other_num / self.num)
except OverflowError:
return self.arith_div_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_div_bigint(self, other_value):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
return make_int(term.BigInt(other_value.div(rbigint.fromint(self.num))))
def arith_div_float(self, other_float):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
return term.Float(other_float / float(self.num))
def arith_floordiv(self, other):
return other.arith_floordiv_number(self.num)
def arith_floordiv_number(self, other_num):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
try:
res = rarithmetic.ovfcheck(other_num // self.num)
except OverflowError:
return self.arith_floordiv_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_floordiv_bigint(self, other_value):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
return make_int(term.BigInt(other_value.floordiv(rbigint.fromint(self.num))))
def arith_floordiv_float(self, other_float):
error.throw_type_error("integer", other_float)
# ------------------ power ------------------
def arith_pow(self, other):
return other.arith_pow_number(self.num)
def arith_pow_number(self, other_num):
try:
res = ovfcheck_float_to_int(math.pow(other_num, self.num))
except OverflowError:
return self.arith_pow_bigint(rbigint.fromint(other_num))
return term.Number(res)
def arith_pow_bigint(self, other_value):
return make_int(term.BigInt(other_value.pow(rbigint.fromint(self.num))))
def arith_pow_float(self, other_float):
return term.Float(math.pow(other_float, float(self.num)))
# ------------------ shift right ------------------
def arith_shr(self, other):
return other.arith_shr_number(self.num)
def arith_shr_number(self, other_num):
return term.Number(other_num >> self.num)
def arith_shr_bigint(self, other_value):
return make_int(term.BigInt(other_value.rshift(self.num)))
# ------------------ shift left ------------------
def arith_shl(self, other):
return other.arith_shl_number(self.num)
def arith_shl_number(self, other_num):
return term.Number(intmask(other_num << self.num))
def arith_shl_bigint(self, other_value):
return make_int(term.BigInt(other_value.lshift(self.num)))
# ------------------ or ------------------
def arith_or(self, other):
return other.arith_or_number(self.num)
def arith_or_number(self, other_num):
return term.Number(other_num | self.num)
def arith_or_bigint(self, other_value):
return make_int(term.BigInt(rbigint.fromint(self.num).or_(other_value)))
# ------------------ and ------------------
def arith_and(self, other):
return other.arith_and_number(self.num)
def arith_and_number(self, other_num):
return term.Number(other_num & self.num)
def arith_and_bigint(self, other_value):
return make_int(term.BigInt(rbigint.fromint(self.num).and_(other_value)))
# ------------------ xor ------------------
def arith_xor(self, other):
return other.arith_xor_number(self.num)
def arith_xor_number(self, other_num):
return term.Number(other_num ^ self.num)
def arith_xor_bigint(self, other_value):
return make_int(term.BigInt(rbigint.fromint(self.num).xor(other_value)))
# ------------------ mod ------------------
def arith_mod(self, other):
return other.arith_mod_number(self.num)
def arith_mod_number(self, other_num):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
return term.Number(other_num % self.num)
def arith_mod_bigint(self, other_value):
if self.num == 0:
error.throw_evaluation_error("zero_divisor")
return make_int(term.BigInt(other_value.mod(rbigint.fromint(self.num))))
# ------------------ inversion ------------------
def arith_not(self):
return term.Number(~self.num)
# ------------------ abs ------------------
def arith_abs(self):
if self.num >= 0:
return self
return term.Number(0).arith_sub(self)
# ------------------ max ------------------
def arith_max(self, other):
return other.arith_max_number(self.num)
def arith_max_number(self, other_num):
return term.Number(max(other_num, self.num))
def arith_max_bigint(self, other_value):
self_value = rbigint.fromint(self.num)
if self_value.lt(other_value):
return make_int(term.BigInt(other_value))
return make_int(term.BigInt(self_value))
def arith_max_float(self, other_float):
return term.Float(max(other_float, float(self.num)))
# ------------------ min ------------------
def arith_min(self, other):
return other.arith_min_number(self.num)
def arith_min_number(self, other_num):
return term.Number(min(other_num, self.num))
def arith_min_bigint(self, other_value):
self_value = rbigint.fromint(self.num)
if self_value.lt(other_value):
return make_int(term.BigInt(self_value))
return make_int(term.BigInt(other_value))
def arith_min_float(self, other_float):
return term.Float(min(other_float, float(self.num)))
# ------------------ miscellanous ------------------
def arith_round(self):
return self
def arith_floor(self):
return self
def arith_ceiling(self):
return self
def arith_float_fractional_part(self):
return term.Number(0)
def arith_float_integer_part(self):
return self
class __extend__(term.Float):
# ------------------ addition ------------------
def arith_add(self, other):
return other.arith_add_float(self.floatval)
def arith_add_number(self, other_num):
return term.Float(float(other_num) + self.floatval)
def arith_add_bigint(self, other_value):
return term.Float(other_value.tofloat() + self.floatval)
def arith_add_float(self, other_float):
return term.Float(other_float + self.floatval)
def arith_unaryadd(self):
return self
# ------------------ subtraction ------------------
def arith_sub(self, other):
return other.arith_sub_float(self.floatval)
def arith_sub_number(self, other_num):
return term.Float(float(other_num) - self.floatval)
def arith_sub_bigint(self, other_value):
return term.Float(other_value.tofloat() - self.floatval)
def arith_sub_float(self, other_float):
return term.Float(other_float - self.floatval)
def arith_unarysub(self):
return term.Float(-self.floatval)
# ------------------ multiplication ------------------
def arith_mul(self, other):
return other.arith_mul_float(self.floatval)
def arith_mul_number(self, other_num):
return term.Float(float(other_num) * self.floatval)
def arith_mul_bigint(self, other_value):
return term.Float(other_value.tofloat() * self.floatval)
def arith_mul_float(self, other_float):
return term.Float(other_float * self.floatval)
# ------------------ division ------------------
def arith_div(self, other):
return other.arith_div_float(self.floatval)
def arith_div_number(self, other_num):
if self.floatval == 0.0:
error.throw_evaluation_error("zero_divisor")
return term.Float(float(other_num) / self.floatval)
def arith_div_bigint(self, other_value):
if self.floatval == 0.0:
error.throw_evaluation_error("zero_divisor")
return term.Float(other_value.tofloat() / self.floatval)
def arith_div_float(self, other_float):
if self.floatval == 0.0:
error.throw_evaluation_error("zero_divisor")
return term.Float(other_float / self.floatval)
def arith_floordiv(self, other_float):
error.throw_type_error("integer", self)
def arith_floordiv_number(self, other_num):
error.throw_type_error("integer", self)
def arith_floordiv_bigint(self, other_value):
error.throw_type_error("integer", self)
def arith_floordiv_float(self, other_float):
error.throw_type_error("integer", other_float)
# ------------------ power ------------------
def arith_pow(self, other):
return other.arith_pow_float(self.floatval)
def arith_pow_number(self, other_num):
return term.Float(math.pow(float(other_num), self.floatval))
def arith_pow_bigint(self, other_value):
return term.Float(math.pow(other_value.tofloat(), self.floatval))
def arith_pow_float(self, other_float):
return term.Float(math.pow(other_float, self.floatval))
# ------------------ abs ------------------
def arith_abs(self):
return term.Float(abs(self.floatval))
# ------------------ max ------------------
def arith_max(self, other):
return other.arith_max_float(self.floatval)
def arith_max_number(self, other_num):
return term.Float(max(float(other_num), self.floatval))
def arith_max_bigint(self, other_value):
return term.Float(max(other_value.tofloat(), self.floatval))
def arith_max_float(self, other_float):
return term.Float(max(other_float, self.floatval))
# ------------------ min ------------------
def arith_min(self, other):
return other.arith_min_float(self.floatval)
def arith_min_number(self, other_num):
return term.Float(min(float(other_num), self.floatval))
def arith_min_bigint(self, other_value):
return term.Float(min(other_value.tofloat(), self.floatval))
def arith_min_float(self, other_float):
return term.Float(min(other_float, self.floatval))
# ------------------ miscellanous ------------------
def arith_round(self):
fval = self.floatval
if fval >= 0:
factor = 1
else:
factor = -1
fval = fval * factor
try:
val = ovfcheck_float_to_int(math.floor(fval + 0.5) * factor)
except OverflowError:
return term.BigInt(rbigint.fromfloat(math.floor(self.floatval + 0.5) * factor))
return term.Number(val)
def arith_floor(self):
try:
val = ovfcheck_float_to_int(math.floor(self.floatval))
except OverflowError:
return term.BigInt(rbigint.fromfloat(math.floor(self.floatval)))
return term.Number(val)
def arith_ceiling(self):
try:
val = ovfcheck_float_to_int(math.ceil(self.floatval))
except OverflowError:
return term.BigInt(rbigint.fromfloat(math.ceil(self.floatval)))
return term.Number(val)
def arith_float_fractional_part(self):
try:
val = ovfcheck_float_to_int(self.floatval)
except OverflowError:
val = rbigint.fromfloat(self.floatval).tofloat()
return term.Float(float(self.floatval - val))
def arith_float_integer_part(self):
try:
val = ovfcheck_float_to_int(self.floatval)
except OverflowError:
return term.BigInt(rbigint.fromfloat(self.floatval))
return term.Number(val)
class __extend__(term.BigInt):
# ------------------ addition ------------------
def arith_add(self, other):
return other.arith_add_bigint(self.value)
def arith_add_number(self, other_num):
return make_int(term.BigInt(rbigint.fromint(other_num).add(self.value)))
def arith_add_bigint(self, other_value):
return make_int(term.BigInt(other_value.add(self.value)))
def arith_add_float(self, other_float):
return term.Float(other_float + self.value.tofloat())
def arith_unaryadd(self):
return self
# ------------------ subtraction ------------------
def arith_sub(self, other):
return other.arith_sub_bigint(self.value)
def arith_sub_number(self, other_num):
return make_int(term.BigInt(rbigint.fromint(other_num).sub(self.value)))
def arith_sub_bigint(self, other_value):
return make_int(term.BigInt(other_value.sub(self.value)))
def arith_sub_float(self, other_float):
return term.Float(other_float - self.value.tofloat())
def arith_unarysub(self):
return term.BigInt(self.value.neg())
# ------------------ multiplication ------------------
def arith_mul(self, other):
return other.arith_mul_bigint(self.value)
def arith_mul_number(self, other_num):
return make_int(term.BigInt(rbigint.fromint(other_num).mul(self.value)))
def arith_mul_bigint(self, other_value):
return make_int(term.BigInt(other_value.mul(self.value)))
def arith_mul_float(self, other_float):
return term.Float(other_float * self.value.tofloat())
# ------------------ division ------------------
def arith_div(self, other):
return other.arith_div_bigint(self.value)
def arith_div_number(self, other_num):
return make_int(term.BigInt(rbigint.fromint(other_num).div(self.value)))
def arith_div_bigint(self, other_value):
try:
return make_int(term.BigInt(other_value.div(self.value)))
except ZeroDivisionError:
error.throw_evaluation_error("zero_divisor")
def arith_div_float(self, other_float):
return term.Float(other_float / self.value.tofloat())
| |
<filename>museum_site/file.py
import hashlib
import io
import os
import subprocess
import zipfile
from datetime import datetime
from random import randint, seed, shuffle
from django.db import models
from django.db.models import Avg, Q
from django.template.defaultfilters import date, filesizeformat
try:
import zookeeper
HAS_ZOOKEEPER = True
except ImportError:
HAS_ZOOKEEPER = False
from .common import (
slash_separated_sort, zipinfo_datetime_tuple_to_str, UPLOAD_CAP,
STATIC_PATH, optimize_image
)
from .constants import SITE_ROOT, REMOVED_ARTICLE, ZETA_RESTRICTED, LANGUAGES
from .review import Review
DETAIL_DOS = 1
DETAIL_WIN16 = 2
DETAIL_WIN32 = 3
DETAIL_WIN64 = 4
DETAIL_LINUX = 5
DETAIL_OSX = 6
DETAIL_FEATURED = 7
DETAIL_CONTEST = 8
DETAIL_ZZM = 9
DETAIL_GFX = 10
DETAIL_MOD = 11
DETAIL_ETC = 12
DETAIL_SZZT = 13
DETAIL_UTILITY = 14
DETAIL_ZZT = 15
DETAIL_ZIG = 16
DETAIL_LOST = 17
DETAIL_UPLOADED = 18
DETAIL_REMOVED = 19
DETAIL_CORRUPT = 20
class FileManager(models.Manager):
def basic_search(self, q):
return self.filter(
Q(title__icontains=q) |
Q(aliases__alias__icontains=q) |
Q(author__icontains=q) |
Q(filename__icontains=q) |
Q(company__icontains=q)
).distinct()
def directory(self, category):
if category == "company":
return self.values(
"company"
).exclude(
company=None
).exclude(
company=""
).distinct().order_by("company")
elif category == "author":
return self.values("author").distinct().order_by("author")
def identifier(self, identifier=None, letter=None, filename=None):
if identifier is None:
return self.filter(letter=letter, filename=filename)
def latest_additions(self):
return self.filter(
spotlight=True
).exclude(
Q(details__id__in=[DETAIL_UPLOADED]) |
Q(release_date__gte="2021-01-01")
).order_by("-publish_date", "-id")
def new_releases(self):
return self.filter(
spotlight=True, release_date__gte="2021-01-01"
).exclude(
details__id__in=[DETAIL_UPLOADED]
).order_by("-publish_date", "-id")
def published(self):
return self.exclude(details__id__in=[DETAIL_UPLOADED, DETAIL_LOST])
def search(self, p):
qs = File.objects.all()
return qs
def standard_worlds(self):
return self.filter(
details__id__in=[DETAIL_ZZT, DETAIL_SZZT, DETAIL_UPLOADED]
)
def random_zzt_world(self):
excluded_details = [
DETAIL_LOST, DETAIL_REMOVED, DETAIL_UPLOADED, DETAIL_CORRUPT
]
max_pk = self.all().order_by("-id")[0].id
zgame = None
while not zgame:
pk = randint(1, max_pk)
zgame = self.filter(pk=pk, details__id=DETAIL_ZZT).exclude(
details__id__in=excluded_details
).exclude(genre__icontains="Explicit").first()
return zgame
def roulette(self, rng_seed, limit):
details = [DETAIL_ZZT, DETAIL_SZZT, DETAIL_UTILITY]
# Get all valid file IDs
ids = list(
self.filter(details__id__in=details).values_list("id", flat=True)
)
# Shuffle them
seed(rng_seed)
shuffle(ids)
# Return them in a random order
return File.objects.filter(id__in=ids[:limit]).order_by("?")
def unpublished(self):
return self.filter(details__id__in=[DETAIL_UPLOADED])
def wozzt(self):
excluded_details = [
DETAIL_UPLOADED, DETAIL_GFX, DETAIL_LOST, DETAIL_CORRUPT
]
return self.filter(
details__in=[DETAIL_ZZT]
).exclude(
Q(details__in=excluded_details) |
Q(author__icontains="_ry0suke_") |
Q(genre__icontains="explicit")
)
class File(models.Model):
""" File object repesenting an upload to the site """
objects = FileManager()
"""
Fields:
letter -- Letter the file can be found under via browse pages
filename -- Name of the (Zip) file (ex: Respite.zip)
title -- Name of the World (ex: Frost 1: Power)
sort_title -- Title used for natural sorting
author -- / sep. ABC list of authors (ex: Hercules/Nadir)
size -- Filesize in bytes (ex: 420690)
genre -- / sep. ABC list of genres (ex: Action/RPG)
release_date -- Best guess release date (ex: 2001-04-16)
release_source -- Source of release date (ex: ZZT file, News post, Text)
screenshot -- Filename of screenshot to display (ex: 3dtalk.png)
company -- / sep. ABC list of companies published (ex: ERI/IF)
description -- Description of file for utilities or featured games
review_count -- Number of reviews on this file
rating -- Average rating if any, from file's reviews
details -- Link to Detail objects
articles -- Link to Article objects
article_count -- Number of articles associated with this file
checksum -- md5 checksum of the zip file
superceded -- FK with File for the "definitive" version of a file
playable_boards -- Number of boards in file that can be accessed in play
total_boards -- Number of boards in file that exist period
archive_name -- name on archive.org (ex: zzt_burgerj)
aliases -- Link to Alias objects
spotlight -- Allow appearance on front page
can_review -- Allow reviews on the file
license -- File license if available
license_source -- Source of license information
(ex: LICENSE file, documentation, game.zzt)
downloads -- Reference to Download sources
language -- / sep. ABC list of language codes (ISO 639-1)
explicit -- If the file contains explicit content
"""
letter = models.CharField(max_length=1, db_index=True, editable=False)
filename = models.CharField(max_length=50)
size = models.IntegerField(default=0, editable=False)
title = models.CharField(max_length=80)
author = models.CharField(max_length=255)
company = models.CharField(
max_length=255, default="", blank=True,
)
genre = models.CharField(max_length=255)
release_date = models.DateField(default=None, null=True, blank=True)
release_source = models.CharField(
max_length=20, null=True, default=None, blank=True
)
language = models.CharField(max_length=50, default="en")
description = models.TextField(null=True, blank=True, default="")
superceded = models.ForeignKey("File", db_column="superceded_id",
null=True, blank=True, default=None,
on_delete=models.SET_NULL)
playable_boards = models.IntegerField(
null=True, blank=True, default=None,
help_text="Set automatically. Do not adjust."
)
total_boards = models.IntegerField(
null=True, blank=True, default=None,
help_text="Set automatically. Do not adjust."
)
archive_name = models.CharField(
max_length=80,
default="",
blank=True,
help_text="ex: zzt_burgerj"
)
screenshot = models.CharField(
max_length=80, blank=True, null=True, default=None
)
license = models.CharField(max_length=150, default="Unknown")
license_source = models.CharField(max_length=150, default="", blank=True)
# Derived Data
checksum = models.CharField(
max_length=32, null=True, blank=True, default=""
)
sort_title = models.CharField(
max_length=100, db_index=True, default="", blank=True,
help_text="Leave blank to set automatically"
)
# Reviews
review_count = models.IntegerField(
default=0, help_text="Set automatically. Do not adjust."
)
rating = models.FloatField(null=True, default=None, blank=True)
# Museum Properties
explicit = models.BooleanField(default=False)
spotlight = models.BooleanField(default=True)
can_review = models.BooleanField(default=True)
publish_date = models.DateTimeField(
null=True, default=None, db_index=True, blank=True,
help_text="Date File was published on the Museum"
)
last_modified = models.DateTimeField(
auto_now=True,
help_text="Date DB entry was last modified"
)
# Associations
aliases = models.ManyToManyField("Alias", default=None, blank=True)
articles = models.ManyToManyField(
"Article", default=None, blank=True
)
article_count = models.IntegerField(
default=0, editable=False
)
details = models.ManyToManyField("Detail", default=None, blank=True)
downloads = models.ManyToManyField("Download", default=None, blank=True)
zeta_config = models.ForeignKey(
"Zeta_Config", null=True, blank=True, default=1,
on_delete=models.SET_NULL
)
class Meta:
ordering = ["sort_title", "letter"]
def __str__(self):
return "[" + str(self.id) + "] " + self.title
def basic_save(self, *args, **kwargs):
super(File, self).save(*args, **kwargs)
def save(self, *args, **kwargs):
# Pre save
# Force lowercase letter
if not self.letter:
self.letter = self.letter_from_title()
else:
self.letter = self.letter.lower()
# Sort genres
self.genre = slash_separated_sort(self.genre)
# Create sorted title
self.calculate_sort_title()
# Recalculate Article Count
self.calculate_article_count()
# If the screenshot is blank and a file exists for it, set it
file_exists = os.path.isfile(
os.path.join(SITE_ROOT, "museum_site/static/images/screenshots/") +
self.letter + "/" + self.filename[:-4] + ".png"
)
if self.screenshot == "" and file_exists:
self.screenshot = self.filename[:-4] + ".png"
# Calculate Review Scores
self.calculate_reviews()
# Update blank md5s
if self.checksum == "" or self.checksum is None:
self.calculate_checksum()
# Set board counts for non-uploads
if HAS_ZOOKEEPER and not kwargs.get("new_upload"):
if not self.playable_boards or not self.total_boards:
self.calculate_boards()
# Actual save call
if kwargs.get("new_upload"):
del kwargs["new_upload"]
super(File, self).save(*args, **kwargs)
def jsoned(self):
data = {
"letter": self.letter,
"filename": self.filename,
"title": self.title,
"sort_title": self.sort_title,
"author": self.author,
"size": self.size,
"genres": self.genre_list(),
"release_date": self.release_date,
"release_source": self.release_source,
"screenshot": self.screenshot,
"company": self.company,
"description": self.description,
"review_count": self.review_count,
"rating": self.rating,
"details": [],
"articles": [],
"aliases": [],
"article_count": self.article_count,
"checksum": self.checksum,
"playable_boards": self.playable_boards,
"total_boards": self.total_boards,
"archive_name": self.archive_name,
"publish_date": self.publish_date,
"last_modified": self.last_modified,
}
for d in self.details.all():
data["details"].append({"id": d.id, "detail": d.detail})
for a in self.articles.all().only("id", "title"):
data["articles"].append({"id": a.id, "title": a.title})
for a in self.aliases.all():
data["aliases"].append({"id": a.id, "alias": a.alias})
return data
def calculate_sort_title(self):
# Handle titles that start with A/An/The
sort_title = self.title.lower()
if sort_title.startswith(("a ", "an ", "the ")):
sort_title = sort_title[sort_title.find(" ") + 1:]
# Expand numbers
words = sort_title.split(" ")
expanded = []
for word in words:
try:
int(word)
expanded.append(("0000" + word)[-4:])
except ValueError:
expanded.append(word)
sort_title = " ".join(expanded)
self.sort_title = sort_title
return True
def letter_from_title(self):
""" Returns the letter a file should be listed under after removing
articles """
title = self.title.lower()
if title.startswith("the "):
title = title.replace("the ", "", 1)
elif title.startswith("a "):
title = title.replace("a ", "", 1)
if title.startswith("an "):
title = title.replace("an ", "", 1)
letter = title[0]
if letter not in "abcdefghijklmnopqrstuvwxyz":
letter = "1"
return letter
def download_url(self):
if (not self.id) or self.is_uploaded():
return "/zgames/uploaded/" + self.filename
else:
return "/zgames/" + self.letter + "/" + self.filename
def download_anchor(self, text="Download"):
url = self.download_url()
ellipses = ""
if self.downloads.count():
url = "/download/{}/{}".format(self.letter, self.filename)
ellipses = "s…"
html = ('<a href="{url}" class="download-link{explicit_class}">'
'{text}{ellipses}</a>').format(
url=url,
text=text,
explicit_class=(" explicit" if self.explicit else ""),
ellipses=ellipses
)
return html
def download_anchor_small(self):
return self.download_anchor(text="DL")
def file_exists(self):
return True if os.path.isfile(self.phys_path()) else False
def play_url(self):
return "/play/" + self.letter + "/" + self.filename
def review_url(self):
return "/review/" + self.letter + "/" + self.filename
def file_url(self):
return "/file/" + self.letter + "/" + self.filename
def attributes_url(self):
return "/attributes/" + self.letter + "/" + self.filename
def phys_path(self):
return os.path.join(SITE_ROOT + self.download_url())
def screenshot_phys_path(self):
""" Returns the physical path to the preview image. If the file has no
preview image set or is using a shared screenshot, return an empty
string.
"""
SPECIAL_SCREENSHOTS = ["zzm_screenshot.png"]
if self.screenshot and self.screenshot not in SPECIAL_SCREENSHOTS:
return os.path.join(STATIC_PATH, "images/screenshots/{}/{}".format(
| |
rather than the letter grade.
"pass/complete/fail/incomplete":: A string value of "pass" or "complete"
will give a score of 100%. "fail" or "incomplete" will give a score of
0.
Note that assignments with grading_type of "pass_fail" can only be
assigned a score of 0 or assignment.points_possible, nothing inbetween. If
a posted_grade in the "points" or "percentage" format is sent, the grade
will only be accepted if the grade equals one of those two values.
"""
if submission_posted_grade is not None:
data["submission[posted_grade]"] = submission_posted_grade
# OPTIONAL - submission[excuse]
"""
Sets the "excused" status of an assignment.
"""
if submission_excuse is not None:
data["submission[excuse]"] = submission_excuse
# OPTIONAL - submission[late_policy_status]
"""
Sets the late policy status to either "late", "missing", "none", or null.
"""
if submission_late_policy_status is not None:
data["submission[late_policy_status]"] = submission_late_policy_status
# OPTIONAL - submission[seconds_late_override]
"""
Sets the seconds late if late policy status is "late"
"""
if submission_seconds_late_override is not None:
data["submission[seconds_late_override]"] = submission_seconds_late_override
# OPTIONAL - rubric_assessment
"""
Assign a rubric assessment to this assignment submission. The
sub-parameters here depend on the rubric for the assignment. The general
format is, for each row in the rubric:
The points awarded for this row.
rubric_assessment[criterion_id][points]
The rating id for the row.
rubric_assessment[criterion_id][rating_id]
Comments to add for this row.
rubric_assessment[criterion_id][comments]
For example, if the assignment rubric is (in JSON format):
!!!javascript
[
{
'id': 'crit1',
'points': 10,
'description': 'Criterion 1',
'ratings':
[
{ 'id': 'rat1', 'description': 'Good', 'points': 10 },
{ 'id': 'rat2', 'description': 'Poor', 'points': 3 }
]
},
{
'id': 'crit2',
'points': 5,
'description': 'Criterion 2',
'ratings':
[
{ 'id': 'rat1', 'description': 'Exemplary', 'points': 5 },
{ 'id': 'rat2', 'description': 'Complete', 'points': 5 },
{ 'id': 'rat3', 'description': 'Incomplete', 'points': 0 }
]
}
]
Then a possible set of values for rubric_assessment would be:
rubric_assessment[crit1][points]=3&rubric_assessment[crit1][rating_id]=rat1&rubric_assessment[crit2][points]=5&rubric_assessment[crit2][rating_id]=rat2&rubric_assessment[crit2][comments]=Well%20Done.
"""
if rubric_assessment is not None:
data["rubric_assessment"] = rubric_assessment
self.logger.debug("PUT /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
def grade_or_comment_on_submission_sections(self, assignment_id, section_id, user_id, comment_file_ids=None, comment_group_comment=None, comment_media_comment_id=None, comment_media_comment_type=None, comment_text_comment=None, include_visibility=None, rubric_assessment=None, submission_excuse=None, submission_late_policy_status=None, submission_posted_grade=None, submission_seconds_late_override=None):
"""
Grade or comment on a submission.
Comment on and/or update the grading for a student's assignment submission.
If any submission or rubric_assessment arguments are provided, the user
must have permission to manage grades in the appropriate context (course or
section).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - section_id
"""
ID
"""
path["section_id"] = section_id
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - user_id
"""
ID
"""
path["user_id"] = user_id
# OPTIONAL - comment[text_comment]
"""
Add a textual comment to the submission.
"""
if comment_text_comment is not None:
data["comment[text_comment]"] = comment_text_comment
# OPTIONAL - comment[group_comment]
"""
Whether or not this comment should be sent to the entire group (defaults
to false). Ignored if this is not a group assignment or if no text_comment
is provided.
"""
if comment_group_comment is not None:
data["comment[group_comment]"] = comment_group_comment
# OPTIONAL - comment[media_comment_id]
"""
Add an audio/video comment to the submission. Media comments can be added
via this API, however, note that there is not yet an API to generate or
list existing media comments, so this functionality is currently of
limited use.
"""
if comment_media_comment_id is not None:
data["comment[media_comment_id]"] = comment_media_comment_id
# OPTIONAL - comment[media_comment_type]
"""
The type of media comment being added.
"""
if comment_media_comment_type is not None:
self._validate_enum(comment_media_comment_type, ["audio", "video"])
data["comment[media_comment_type]"] = comment_media_comment_type
# OPTIONAL - comment[file_ids]
"""
Attach files to this comment that were previously uploaded using the
Submission Comment API's files action
"""
if comment_file_ids is not None:
data["comment[file_ids]"] = comment_file_ids
# OPTIONAL - include[visibility]
"""
Whether this assignment is visible to the owner of the submission
"""
if include_visibility is not None:
data["include[visibility]"] = include_visibility
# OPTIONAL - submission[posted_grade]
"""
Assign a score to the submission, updating both the "score" and "grade"
fields on the submission record. This parameter can be passed in a few
different formats:
points:: A floating point or integral value, such as "13.5". The grade
will be interpreted directly as the score of the assignment.
Values above assignment.points_possible are allowed, for awarding
extra credit.
percentage:: A floating point value appended with a percent sign, such as
"40%". The grade will be interpreted as a percentage score on the
assignment, where 100% == assignment.points_possible. Values above 100%
are allowed, for awarding extra credit.
letter grade:: A letter grade, following the assignment's defined letter
grading scheme. For example, "A-". The resulting score will be the high
end of the defined range for the letter grade. For instance, if "B" is
defined as 86% to 84%, a letter grade of "B" will be worth 86%. The
letter grade will be rejected if the assignment does not have a defined
letter grading scheme. For more fine-grained control of scores, pass in
points or percentage rather than the letter grade.
"pass/complete/fail/incomplete":: A string value of "pass" or "complete"
will give a score of 100%. "fail" or "incomplete" will give a score of
0.
Note that assignments with grading_type of "pass_fail" can only be
assigned a score of 0 or assignment.points_possible, nothing inbetween. If
a posted_grade in the "points" or "percentage" format is sent, the grade
will only be accepted if the grade equals one of those two values.
"""
if submission_posted_grade is not None:
data["submission[posted_grade]"] = submission_posted_grade
# OPTIONAL - submission[excuse]
"""
Sets the "excused" status of an assignment.
"""
if submission_excuse is not None:
data["submission[excuse]"] = submission_excuse
# OPTIONAL - submission[late_policy_status]
"""
Sets the late policy status to either "late", "missing", "none", or null.
"""
if submission_late_policy_status is not None:
data["submission[late_policy_status]"] = submission_late_policy_status
# OPTIONAL - submission[seconds_late_override]
"""
Sets the seconds late if late policy status is "late"
"""
if submission_seconds_late_override is not None:
data["submission[seconds_late_override]"] = submission_seconds_late_override
# OPTIONAL - rubric_assessment
"""
Assign a rubric assessment to this assignment submission. The
sub-parameters here depend on the rubric for the assignment. The general
format is, for each row in the rubric:
The points awarded for this row.
rubric_assessment[criterion_id][points]
The rating id for the row.
rubric_assessment[criterion_id][rating_id]
Comments to add for this row.
rubric_assessment[criterion_id][comments]
For example, if the assignment rubric is (in JSON format):
!!!javascript
[
{
'id': 'crit1',
'points': 10,
'description': 'Criterion 1',
'ratings':
[
{ 'id': 'rat1', 'description': 'Good', 'points': 10 },
{ 'id': 'rat2', 'description': 'Poor', 'points': 3 }
]
},
{
'id': 'crit2',
'points': 5,
'description': 'Criterion 2',
'ratings':
[
{ 'id': 'rat1', 'description': 'Exemplary', 'points': 5 },
{ 'id': 'rat2', 'description': 'Complete', 'points': 5 },
{ 'id': 'rat3', 'description': 'Incomplete', 'points': 0 }
]
}
]
Then a possible set of values for rubric_assessment would be:
rubric_assessment[crit1][points]=3&rubric_assessment[crit1][rating_id]=rat1&rubric_assessment[crit2][points]=5&rubric_assessment[crit2][rating_id]=rat2&rubric_assessment[crit2][comments]=Well%20Done.
"""
if rubric_assessment is not None:
data["rubric_assessment"] = rubric_assessment
self.logger.debug("PUT /api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/sections/{section_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True)
def list_gradeable_students(self, assignment_id, course_id):
"""
List gradeable students.
A paginated list of students eligible to submit the assignment. The caller must have permission to view grades.
If anonymous grading is enabled for the current assignment and the allow_new_anonymous_id parameter is passed,
the returned data will not include any values identifying the student, but will instead include an
assignment-specific anonymous ID for each student.
Section-limited instructors will only see students in their own sections.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_id
"""
ID
"""
path["assignment_id"] = assignment_id
self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/gradeable_students with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/gradeable_students".format(**path), data=data, params=params, all_pages=True)
def list_multiple_assignments_gradeable_students(self, course_id, assignment_ids=None):
"""
List multiple assignments gradeable students.
A paginated list of students eligible to submit a list of assignments. The caller must have
permission to view grades for | |
<gh_stars>1-10
''' A base point spread function interface
'''
import numpy as np
from scipy import interpolate
from scipy.special import j1
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.axes_rgb import make_rgb_axes
from prysm.conf import config
from prysm.mathops import pi, fft2, ifft2, fftshift, ifftshift, floor
from prysm.fttools import pad2d, forward_ft_unit
from prysm.coordinates import uniform_cart_to_polar, resample_2d_complex
from prysm.util import pupil_sample_to_psf_sample, correct_gamma, share_fig_ax
class PSF(object):
''' Point Spread Function representations.
Properties:
slice_x: 1D slice through center of PSF along X axis. Returns (x,y) data.
slice_y: 1D slice through cente rof PSF along y axis. Returns (x,y) data.
Instance Methods:
encircled_energy: Computes the encircled energy along the specified
azimuth. If azimuth is none, returns the azimuthal average.
Returned value is the average of both the negative and positive
sides of the PSF along the specified azimuth.
plot2d: Makes a 2D plot of the PSF. Returns (fig, axis).
plot_slice_xy: Makes a 1D plot with x and y slices through the center
of the PSF. Returns (fig, axis).
plot_encircled_energy: Makes a 1D plot of the encircled energy at the
specified azimuth. Returns (fig, axis).
conv: convolves this PSF with another. Returns a new PSF object that is
sampled at the same points as this PSF.
Private Instance Methods:
_renorm: renormalizes the PSF to unit peak intensity.
Static Methods:
from_pupil: given a pupil and a focal length, returns a PSF.
Notes:
Subclasses must implement an analyic_ft method with signature
a_ft(unit_x, unit_y).
'''
def __init__(self, data, sample_spacing):
''' Creates a PSF object.
Args:
data (`numpy.ndarray`): intensity data for the PSF.
sample_spacing (`float`): center-to-center spacing of samples,
expressed in microns.
Returns:
`PSF`: a new PSF instance.
'''
self.data = data
self.sample_spacing = sample_spacing
self.samples_x, self.samples_y = data.shape
self.center_x = self.samples_x // 2
self.center_y = self.samples_y // 2
# compute ordinate axis
ext_x = self.sample_spacing * self.samples_x / 2
ext_y = self.sample_spacing * self.samples_y / 2
self.unit_x = np.linspace(-ext_x, ext_x - sample_spacing,
self.samples_x, dtype=config.precision)
self.unit_y = np.linspace(-ext_y, ext_y - sample_spacing,
self.samples_y, dtype=config.precision)
# quick-access slices ------------------------------------------------------
@property
def slice_x(self):
''' Retrieves a slice through the x axis of the PSF.
'''
return self.unit_x, self.data[self.center_x]
@property
def slice_y(self):
''' Retrieves a slices through the y axis of the PSF.
'''
return self.unit_y, self.data[:, self.center_y]
def encircled_energy(self, azimuth=None):
''' Returns the encircled energy at the requested azumith. If azimuth
is None, returns the azimuthal average.
Args:
azimuth (`float`): azimuth to retrieve data along, in degrees.
Returns:
np.ndarray, np.ndarray. Unit, encircled energy.
'''
# interp_dat is shaped with axis0=phi, axis1=rho
rho, phi, interp_dat = uniform_cart_to_polar(self.unit_x, self.unit_y, self.data)
if azimuth is None:
# take average of all azimuths as input data
dat = interp_dat.mean(axis=0)
else:
index = np.searchsorted(phi, np.radians(azimuth))
dat = interp_dat[index, :]
enc_eng = np.cumsum(dat, dtype=config.precision)
return self.unit_x[self.center_x:], enc_eng / enc_eng[-1]
# quick-access slices ------------------------------------------------------
# plotting -----------------------------------------------------------------
def plot2d(self, axlim=25, power=1, interp_method='lanczos',
pix_grid=None, fig=None, ax=None,
show_axlabels=True, show_colorbar=True):
''' Creates a 2D plot of the PSF.
Args:
axlim (`float`): limits of axis, symmetric.
xlim=(-axlim,axlim), ylim=(-axlim, axlim).
power (`float`): power to stretch the data by for plotting.
interp_method (`string`): method used to interpolate the image between
samples of the PSF.
pix_grid (`float`): if not None, overlays gridlines with spacing equal
to pix_grid. Intended to show the collection into camera pixels
while still in the oversampled domain.
fig (pyplot.figure): figure to plot in.
ax (pyplot.axis): axis to plot in.
show_axlabels (`bool`): whether or not to show the axis labels.
show_colorbar (`bool`): whether or not to show the colorbar.
Returns:
pyplot.fig, pyplot.axis. Figure and axis containing the plot.
'''
fcn = correct_gamma(self.data ** power)
label_str = 'Normalized Intensity [a.u.]'
lims = (0, 1)
left, right = self.unit_x[0], self.unit_x[-1]
bottom, top = self.unit_y[0], self.unit_y[-1]
fig, ax = share_fig_ax(fig, ax)
im = ax.imshow(fcn,
extent=[left, right, bottom, top],
origin='lower',
cmap='Greys_r',
interpolation=interp_method,
clim=lims)
if show_colorbar:
cb = fig.colorbar(im, label=label_str, ax=ax, fraction=0.046)
cb.outline.set_edgecolor('k')
cb.outline.set_linewidth(0.5)
if show_axlabels:
ax.set(xlabel=r'Image Plane $x$ [$\mu m$]',
ylabel=r'Image Plane $y$ [$\mu m$]')
ax.set(xlim=(-axlim, axlim),
ylim=(-axlim, axlim))
if pix_grid is not None:
# if pixel grid is desired, add it
mult = floor(axlim / pix_grid)
gmin, gmax = -mult * pix_grid, mult * pix_grid
pts = np.arange(gmin, gmax, pix_grid)
ax.set_yticks(pts, minor=True)
ax.set_xticks(pts, minor=True)
ax.yaxis.grid(True, which='minor', color='white', alpha=0.25)
ax.xaxis.grid(True, which='minor', color='white', alpha=0.25)
return fig, ax
def plot_slice_xy(self, log=False, axlim=20, fig=None, ax=None):
''' Makes a 1D plot of X and Y slices through the PSF.
Args:
log (`bool`): if true, plot in log scale. if false, plot in linear
scale.
axlim (`float`): limits of axis, will plot [-axlim, axlim].
fig (pyplot.figure): figure to plot in.
ax (pyplot.axis): axis to plot in.
Returns:
pyplot.fig, pyplot.axis. Figure and axis containing the plot.
'''
ux, x = self.slice_x
uy, y = self.slice_y
if log:
fcn_x = 20 * np.log10(1e-100 + x)
fcn_y = 20 * np.log10(1e-100 + y)
label_str = 'Normalized Intensity [dB]'
lims = (-120, 0)
else:
fcn_x = x
fcn_y = y
label_str = 'Normalized Intensity [a.u.]'
lims = (0, 1)
fig, ax = share_fig_ax(fig, ax)
ax.plot(ux, fcn_x, label='Slice X', lw=3)
ax.plot(uy, fcn_y, label='Slice Y', lw=3)
ax.set(xlabel=r'Image Plane X [$\mu m$]',
ylabel=label_str,
xlim=(-axlim, axlim),
ylim=lims)
plt.legend(loc='upper right')
return fig, ax
def plot_encircled_energy(self, azimuth=None, axlim=20, fig=None, ax=None):
''' Makes a 1D plot of the encircled energy at the given azimuth.
Args:
azimuth (`float`): azimuth to plot at, in degrees.
axlim (`float`): limits of axis, will plot [0, axlim].
fig (pyplot.figure): figure to plot in.
ax (pyplot.axis): axis to plot in.
Returns:
pyplot.fig, pyplot.axis. Figure and axis containing the plot
'''
unit, data = self.encircled_energy(azimuth)
fig, ax = share_fig_ax(fig, ax)
ax.plot(unit, data, lw=3)
ax.set(xlabel=r'Image Plane Distance [$\mu m$]',
ylabel=r'Encircled Energy [Rel 1.0]',
xlim=(0, axlim))
return fig, ax
# plotting -----------------------------------------------------------------
# helpers ------------------------------------------------------------------
def conv(self, psf2):
'''Convolves this PSF with another
Args:
psf2 (`PSF`): PSf to convolve with this one.
Returns:
PSF: A new `PSF` that is the convolution of these two PSFs.
Notes:
output PSF has equal sampling to whichever PSF has a lower nyquist
frequency.
'''
try:
psf_ft = fftshift(fft2(self.data))
psf_unit_x = forward_ft_unit(self.sample_spacing, self.samples_x)
psf_unit_y = forward_ft_unit(self.sample_spacing, self.samples_y)
psf2_ft = psf2.analytic_ft(psf_unit_x, psf_unit_y)
psf3 = PSF(data=abs(ifft2(psf_ft * psf2_ft)),
sample_spacing=self.sample_spacing)
return psf3._renorm()
except AttributeError: # no analytic FT on the PSF/subclass
print('No analytic FT, falling back to numerical approach.')
return convpsf(self, psf2)
def _renorm(self, to='peak'):
''' Renormalizes the PSF to unit peak intensity.
Args:
to (`string`): renormalization target. Either "peak" or "total",
produces a PSF of unit peak or total intensity.
Returns:
`PSF`: a renormalized PSF instance.
'''
if to.lower() == 'peak':
self.data /= self.data.max()
elif to.lower() == 'total':
ttl = self.data.sum()
self.data /= ttl
return self
# helpers ------------------------------------------------------------------
@staticmethod
def from_pupil(pupil, efl, padding=1):
''' Uses scalar diffraction propogation to generate a PSF from a pupil.
Args:
pupil (prysm.Pupil): Pupil, with OPD data and wavefunction.
efl (float): effective focal length of the optical system.
padding (number): number of pupil widths to pad each side of the
pupil with during computation.
Returns:
PSF. A new PSF instance.
'''
# padded pupil contains 1 pupil width on each side for a width of 3
psf_samples = (pupil.samples * padding) * 2 + pupil.samples
sample_spacing = pupil_sample_to_psf_sample(pupil_sample=pupil.sample_spacing * 1000,
num_samples=psf_samples,
wavelength=pupil.wavelength,
efl=efl)
padded_wavefront = pad2d(pupil.fcn, padding)
impulse_response = ifftshift(fft2(fftshift(padded_wavefront)))
psf = abs(impulse_response)**2
return PSF(psf / np.max(psf), sample_spacing)
class MultispectralPSF(PSF):
''' A PSF which includes multiple wavelength components.
'''
def __init__(self, psfs, weights=None):
''' Creates a new :class:`MultispectralPSF` instance.
Args:
psfs (iterable): iterable of PSFs.
weights (iterable): iterable of weights associated with each PSF.
Returns:
MultispectralPSF. A new `MultispectralPSF`.
'''
if weights is None:
weights = [1] * len(psfs)
# find the most densely sampled PSF
min_spacing = 1e99
ref_idx = None
ref_unit_x = None
ref_unit_y = None
ref_samples_x = None
ref_samples_y = None
for idx, psf in enumerate(psfs):
if psf.sample_spacing < min_spacing:
min_spacing = psf.sample_spacing
ref_idx = idx
ref_unit_x = psf.unit_x
ref_unit_y = psf.unit_y
ref_samples_x = psf.samples_x
ref_samples_y = psf.samples_y
merge_data = np.zeros((ref_samples_x, ref_samples_y, len(psfs)))
for idx, psf in enumerate(psfs):
# don't do anything to our reference PSF
if | |
from __future__ import division
from collections import Counter, defaultdict
import os
from random import shuffle
import tensorflow as tf
import pickle
import os.path
import numpy as np
import collections
import math
import numpy
from pathlib import Path
SUCCESS_HEADER = 'S_'
class NotTrainedError(Exception):
pass
class NotFitToCorpusError(Exception):
pass
class GloVeModel():
EMBEDDINGS = "embeddings"
M_ID = 0
SAVE_DIR_NAME = "model"
SAVE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), SAVE_DIR_NAME)
E_DIR_NAME = "embeddings"
TF_CP_DIR_NAME = "tf_checkpoints"
OLD_MODEL_BASEDIR = os.path.join(SAVE_DIR, SAVE_DIR_NAME + str(M_ID))
NEW_MODEL_BASEDIR = os.path.join(SAVE_DIR, SAVE_DIR_NAME + str(M_ID + 1))
TF_MODEL_NAME = "tf_model.ckpt"
has_TF_Model = False
def __init__(self, embedding_size, context_size, max_vocab_size=100000, min_occurrences=1,
scaling_factor=3 / 4, cooccurrence_cap=100, batch_size=50, learning_rate=0.05):
self.embedding_size = embedding_size
if isinstance(context_size, tuple):
self.left_context, self.right_context = context_size
elif isinstance(context_size, int):
self.left_context = self.right_context = context_size
else:
raise ValueError("`context_size` should be an int or a tuple of two ints")
self.max_vocab_size = max_vocab_size
self.min_occurrences = min_occurrences
self.scaling_factor = scaling_factor
self.cooccurrence_cap = cooccurrence_cap
self.batch_size = batch_size
self.learning_rate = learning_rate
self.find_latest_train_iter()
self.create_or_load_model()
def find_latest_train_iter(self):
folder_cont = os.listdir(self.SAVE_DIR)
if len(folder_cont) is not 0:
self.M_ID = self.__get_latest_file( self.SAVE_DIR_NAME , folder_cont)
self.OLD_MODEL_BASEDIR = os.path.join(self.SAVE_DIR, self.SAVE_DIR_NAME + str(self.M_ID))
self.NEW_MODEL_BASEDIR = os.path.join(self.SAVE_DIR, self.SAVE_DIR_NAME + str(self.M_ID + 1))
def __get_latest_file(self, base_filename ,folder_contents):
max = 0
for file in folder_contents:
cur_id = int(file.replace(base_filename, ""))
if cur_id > max:
max = cur_id
return max
def create_or_load_model(self):
# self.__word_to_id = self.load_obj(self.WORD_TO_ID)
self.__embeddings = self.load_obj(self.EMBEDDINGS)
self.__words = [word for word, _ in self.__embeddings.items()] if (self.__embeddings is not None) else None
self.__new_words = None
self.__existing_words_count = len(self.__words) if (self.__words is not None) else 0
self.__cooccurrence_matrix = None
self.__word_counts = Counter()
def fit_to_corpus(self, data, in_type="corpus"):
self.__fit_to_corpus(data, in_type, self.max_vocab_size, self.min_occurrences,
self.left_context, self.right_context)
self.__graph = tf.Graph()
self.__create_graph(self.__graph)
def __fit_to_corpus(self, data, in_type, vocab_size, min_occurrences, left_size, right_size):
if in_type.lower() == 'corpus':
cooccurrence_counts = self.__build_coocur_mat_from_corpus(data, left_size, right_size)
self.__new_words = [word for word, count in self.__word_counts.most_common(vocab_size)
if (count >= min_occurrences and (
self.__words is None or word not in self.__embeddings.keys()))]
elif in_type.lower() == 'sparkcrawl':
cooccurrence_counts = self.__convert_csv_to_coocur_dict(data)
else:
raise ValueError
if len(cooccurrence_counts) == 0:
raise ValueError("No coccurrences in corpus. Did you try to reuse a generator?")
if self.__words != None:
self.__words = self.__words + [word for word in self.__new_words]
else:
self.__words = [word for i, word in enumerate(self.__new_words)]
# TODO : Check if word id is in coocurence matrix
self.__cooccurrence_matrix = {
(self.__words.index(words[0]), self.__words.index(words[1])): count
for words, count in cooccurrence_counts.items()
if words[0] in self.__words and words[1] in self.__words}
def __convert_csv_to_coocur_dict(self, res_folder):
import csv, win32api
import platform
cooccurrence_counts = defaultdict(float)
new_words = []
res_folder_gen = [label_folder for label_folder in os.listdir(res_folder) if label_folder[:2] != SUCCESS_HEADER]
for label_folder in res_folder_gen:
csv_gen = [csv_fname for csv_fname in os.listdir(os.path.join(res_folder, label_folder)) if csv_fname[-3:] == 'csv']
for csv_fname in csv_gen:
if any(platform.win32_ver()):
csv_file = win32api.GetShortPathName(os.path.join(win32api.GetShortPathName(res_folder), label_folder, csv_fname))
else:
csv_file = os.path.join(res_folder, label_folder, csv_fname)
reader = csv.DictReader(open(csv_file), fieldnames=['tgt_word', 'ctx_word', 'coor_val'])
for row in reader:
target_word = row['tgt_word']
context_word = row['ctx_word']
print(row['tgt_word'])
if (self.__embeddings is None or target_word not in self.__embeddings.keys()) and target_word not in new_words:
new_words.append(target_word)
if (self.__embeddings is None or context_word not in self.__embeddings.keys()) and context_word not in new_words:
new_words.append(context_word)
cooccurrence_counts[(target_word, context_word)] = row['coor_val']
self.__new_words = new_words
return cooccurrence_counts
def __build_coocur_mat_from_corpus(self, corpus, left_size, right_size):
cooccurrence_counts = defaultdict(float)
for formerRegion in corpus:
region = formerRegion.split()
self.__word_counts.update(region)
for l_context, word, r_context in _context_windows(region, left_size, right_size):
for i, context_word in enumerate(l_context[::-1]):
cooccurrence_counts[(word, context_word)] += 1 / (i + 1)
for i, context_word in enumerate(r_context):
cooccurrence_counts[(word, context_word)] += 1 / (i + 1)
return cooccurrence_counts
def save_obj(self, obj, name):
new_save_dir = os.path.join(self.NEW_MODEL_BASEDIR, self.E_DIR_NAME)
os.makedirs(new_save_dir)
with open(os.path.join((new_save_dir), name) + '.pkl' , 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(self, name):
try:
with open(os.path.join(self.OLD_MODEL_BASEDIR, self.E_DIR_NAME , name) + '.pkl', 'rb') as f:
return pickle.load(f)
except FileNotFoundError:
return None
def restore_vars(self, saver, sess, chkpt_dir):
tf.global_variables_initializer().run()
checkpoint_dir = chkpt_dir
if not os.path.exists(checkpoint_dir):
try:
return False
except OSError:
pass
path = tf.train.get_checkpoint_state(checkpoint_dir)
print(checkpoint_dir, "path = ", path)
if path is None:
return False
else:
saver.restore(sess, path.model_checkpoint_path)
return True
def tf_checkpoints_available(self, checkpoint_dir):
path = tf.train.get_checkpoint_state(checkpoint_dir)
if path is None:
return False
else:
self.has_TF_Model = True
return True
def __recreate_graph(self, __recreated_graph, old_focal_embeddings, old_context_embeddings, old_focal_biases,
old_context_biases):
with __recreated_graph.as_default(), __recreated_graph.device(_device_for_node):
count_max = tf.constant([self.cooccurrence_cap], dtype=tf.float32,
name='max_cooccurrence_count')
scaling_factor = tf.constant([self.scaling_factor], dtype=tf.float32,
name="scaling_factor")
self.__focal_input = tf.placeholder(tf.int32, shape=[self.batch_size],
name="focal_words")
self.__context_input = tf.placeholder(tf.int32, shape=[self.batch_size],
name="context_words")
self.__cooccurrence_count = tf.placeholder(tf.float32, shape=[self.batch_size],
name="cooccurrence_count")
self.focal_embeddings = tf.Variable(np.concatenate((old_focal_embeddings, np.random.uniform(-1, 1, (
self.new_vocab_size, self.embedding_size)).astype(np.float32)), axis=0),
name="focal_embeddings")
self.context_embeddings = tf.Variable(np.concatenate((old_context_embeddings, np.random.uniform(-1, 1, (
self.new_vocab_size, self.embedding_size)).astype(np.float32)), axis=0),
name="context_embeddings")
self.focal_biases = tf.Variable(
np.concatenate((old_focal_biases, np.random.uniform(-1, 1, self.new_vocab_size).astype(np.float32)),
axis=0),
name='focal_biases')
self.context_biases = tf.Variable(
np.concatenate((old_context_biases, np.random.uniform(-1, 1, self.new_vocab_size).astype(np.float32)),
axis=0),
name="context_biases")
focal_embedding = tf.nn.embedding_lookup([self.focal_embeddings], self.__focal_input)
context_embedding = tf.nn.embedding_lookup([self.context_embeddings], self.__context_input)
focal_bias = tf.nn.embedding_lookup([self.focal_biases], self.__focal_input)
context_bias = tf.nn.embedding_lookup([self.context_biases], self.__context_input)
weighting_factor = tf.minimum(
1.0,
tf.pow(
tf.div(self.__cooccurrence_count, count_max),
scaling_factor))
embedding_product = tf.reduce_sum(tf.multiply(focal_embedding, context_embedding), 1)
log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))
distance_expr = tf.square(tf.add_n([
embedding_product,
focal_bias,
context_bias,
tf.negative(log_cooccurrences)]))
single_losses = tf.multiply(weighting_factor, distance_expr)
self.__total_loss = tf.reduce_sum(single_losses)
tf.summary.scalar("GloVe_loss", self.__total_loss)
self.__optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(
self.__total_loss)
self.__summary = tf.summary.merge_all()
self.__combined_embeddings = tf.add(self.focal_embeddings, self.context_embeddings,
name="combined_embeddings")
self.saver = tf.train.Saver()
def __create_graph(self, graph):
with graph.as_default(), graph.device(_device_for_node):
count_max = tf.constant([self.cooccurrence_cap], dtype=tf.float32,
name='max_cooccurrence_count')
scaling_factor = tf.constant([self.scaling_factor], dtype=tf.float32,
name="scaling_factor")
self.__focal_input = tf.placeholder(tf.int32, shape=[self.batch_size],
name="focal_words")
self.__context_input = tf.placeholder(tf.int32, shape=[self.batch_size],
name="context_words")
self.__cooccurrence_count = tf.placeholder(tf.float32, shape=[self.batch_size],
name="cooccurrence_count")
# TODO CHECK IF OTHER TENSORFLOW VARIABLES ARE ACTUALLY TRAINED
if self.tf_checkpoints_available(os.path.join(self.OLD_MODEL_BASEDIR, self.TF_CP_DIR_NAME)):
self.focal_embeddings = tf.Variable(
tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
name="focal_embeddings")
self.context_embeddings = tf.Variable(
tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
name="context_embeddings")
self.focal_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
name='focal_biases')
self.context_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
name="context_biases")
else:
self.focal_embeddings = tf.Variable(
tf.random_uniform([self.new_vocab_size, self.embedding_size], 1.0, -1.0),
name="focal_embeddings")
self.context_embeddings = tf.Variable(
tf.random_uniform([self.new_vocab_size, self.embedding_size], 1.0, -1.0),
name="context_embeddings")
self.focal_biases = tf.Variable(tf.random_uniform([self.new_vocab_size], 1.0, -1.0),
name='focal_biases')
self.context_biases = tf.Variable(tf.random_uniform([self.new_vocab_size], 1.0, -1.0),
name="context_biases")
focal_embedding = tf.nn.embedding_lookup([self.focal_embeddings], self.__focal_input)
context_embedding = tf.nn.embedding_lookup([self.context_embeddings], self.__context_input)
focal_bias = tf.nn.embedding_lookup([self.focal_biases], self.__focal_input)
context_bias = tf.nn.embedding_lookup([self.context_biases], self.__context_input)
weighting_factor = tf.minimum(
1.0,
tf.pow(
tf.div(self.__cooccurrence_count, count_max),
scaling_factor))
embedding_product = tf.reduce_sum(tf.multiply(focal_embedding, context_embedding), 1)
log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))
distance_expr = tf.square(tf.add_n([
embedding_product,
focal_bias,
context_bias,
tf.negative(log_cooccurrences)]))
single_losses = tf.multiply(weighting_factor, distance_expr)
self.__total_loss = tf.reduce_sum(single_losses)
tf.summary.scalar("GloVe_loss", self.__total_loss)
self.__optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(
self.__total_loss)
self.__summary = tf.summary.merge_all()
self.__combined_embeddings = tf.add(self.focal_embeddings, self.context_embeddings,
name="combined_embeddings")
self.saver = tf.train.Saver()
def train(self, num_epochs, log_dir=None, summary_batch_interval=1000,
tsne_epoch_interval=None):
should_write_summaries = log_dir is not None and summary_batch_interval
should_generate_tsne = log_dir is not None and tsne_epoch_interval
batches = self.__prepare_batches()
total_steps = 0
with tf.Session(graph=self.__graph) as session:
if should_write_summaries:
print("Writing TensorBoard summaries to {}".format(log_dir))
summary_writer = tf.summary.FileWriter(log_dir, graph=session.graph)
else:
summary_writer = None
self.restore_vars(self.saver, session, os.path.join(self.OLD_MODEL_BASEDIR, self.TF_CP_DIR_NAME))
if self.has_TF_Model:
__recreated_graph = tf.Graph()
old_focal_embeddings = self.focal_embeddings.eval()
old_context_embeddings = self.context_embeddings.eval()
old_focal_biases = self.focal_biases.eval()
old_context_biases = self.context_biases.eval()
self.__recreate_graph(__recreated_graph, old_focal_embeddings, old_context_embeddings, old_focal_biases,
old_context_biases)
with tf.Session(graph=__recreated_graph) as innersess:
tf.global_variables_initializer().run()
self.__inner_train(innersess, num_epochs, batches, summary_batch_interval,
should_write_summaries, total_steps, should_generate_tsne,
summary_writer, tsne_epoch_interval, log_dir)
else:
self.__inner_train(session, num_epochs, batches, summary_batch_interval,
should_write_summaries, total_steps, should_generate_tsne,
summary_writer, tsne_epoch_interval, log_dir)
self.__existing_words_count = len(self.__words)
self.save_obj(self.__embeddings, self.EMBEDDINGS)
def __inner_train(self, session, num_epochs, batches, summary_batch_interval,
should_write_summaries, total_steps, should_generate_tsne,
summary_writer, tsne_epoch_interval, log_dir):
for epoch in range(num_epochs):
shuffle(batches)
for batch_index, batch in enumerate(batches):
i_s, j_s, counts = batch
if len(counts) != self.batch_size:
continue
feed_dict = {
self.__focal_input: i_s,
self.__context_input: j_s,
self.__cooccurrence_count: counts}
session.run([self.__optimizer], feed_dict=feed_dict)
if should_write_summaries and (total_steps + 1) % summary_batch_interval == 0:
summary_str = session.run(self.__summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, total_steps)
total_steps += 1
if should_generate_tsne and (epoch + 1) % tsne_epoch_interval == 0:
current_embeddings = self.__combined_embeddings.eval()
output_path = os.path.join(log_dir, "epoch{:03d}.png".format(epoch + 1))
self.generate_tsne(output_path, embeddings=current_embeddings)
self.__embeddings = self.__combined_embeddings.eval()
self.__embeddings = collections.OrderedDict(
[(self.__words[i], embedding) for i, embedding in enumerate(self.__embeddings)])
if should_write_summaries:
summary_writer.close()
self.save_tf_model(session)
def save_tf_model(self,session):
new_save_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), self.SAVE_DIR_NAME, self.SAVE_DIR_NAME + str(self.M_ID + 1) , self.TF_CP_DIR_NAME)
os.makedirs(new_save_dir)
save_path = self.saver.save(session, os.path.join(new_save_dir, self.TF_MODEL_NAME))
print("Model saved in file: %s" % save_path)
def copy_embeddings(self, loaded_embeddings):
self.__embeddings = loaded_embeddings
self.__recreate_graph()
def embedding_for(self, word_str_or_id):
if isinstance(word_str_or_id, str):
return self.__embeddings[word_str_or_id]
def __prepare_batches(self):
if self.__cooccurrence_matrix is None:
raise NotFitToCorpusError(
"Need to fit model to corpus before preparing training batches.")
cooccurrences = [(word_ids[0], word_ids[1], count)
for word_ids, count in self.__cooccurrence_matrix.items()]
i_indices, j_indices, counts = zip(*cooccurrences)
return list(_batchify(self.batch_size, i_indices, j_indices, counts))
@property
def vocab_size(self):
return self.__existing_words_count
@property
def new_vocab_size(self):
return len(self.__new_words)
@property
def words(self):
if self.__words is None:
raise NotFitToCorpusError("Need to fit model to corpus before accessing words.")
return self.__words
@property
def embeddings(self):
if self.__embeddings is None:
raise NotTrainedError("Need to train model before accessing embeddings")
return self.__embeddings
def generate_tsne(self, path="glove/model/model", size=(100, 100), word_count=1000, embeddings=None):
if embeddings is None:
embeddings = self.embeddings
from sklearn.manifold import TSNE
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
low_dim_embs = | |
399 2 1 398 400
1 400 0 0
1 401 2 1 402 403
1 402 2 1 401 403
1 403 0 0
1 404 2 1 405 406
1 405 2 1 404 406
1 406 0 0
1 407 2 1 408 409
1 408 2 1 407 409
1 409 0 0
1 410 2 1 411 412
1 411 2 1 410 412
1 412 0 0
1 413 2 1 414 415
1 414 2 1 413 415
1 415 0 0
1 416 2 1 417 418
1 417 2 1 416 418
1 418 0 0
1 419 2 1 420 421
1 420 2 1 419 421
1 421 0 0
1 422 2 1 423 424
1 423 2 1 422 424
1 424 0 0
1 425 2 1 426 427
1 426 2 1 425 427
1 427 0 0
1 428 2 1 429 430
1 429 2 1 428 430
1 430 0 0
1 431 2 1 432 433
1 432 2 1 431 433
1 433 0 0
1 434 2 1 435 436
1 435 2 1 434 436
1 436 0 0
1 437 2 1 438 439
1 438 2 1 437 439
1 439 0 0
1 440 2 1 441 442
1 441 2 1 440 442
1 442 0 0
1 443 2 1 444 445
1 444 2 1 443 445
1 445 0 0
1 446 2 1 447 448
1 447 2 1 446 448
1 448 0 0
1 449 2 1 450 451
1 450 2 1 449 451
1 451 0 0
1 452 2 1 453 454
1 453 2 1 452 454
1 454 0 0
1 455 2 1 456 457
1 456 2 1 455 457
1 457 0 0
1 458 2 1 459 460
1 459 2 1 458 460
1 460 0 0
1 461 2 1 462 463
1 462 2 1 461 463
1 463 0 0
1 464 2 1 465 466
1 465 2 1 464 466
1 466 0 0
1 467 2 1 468 469
1 468 2 1 467 469
1 469 0 0
1 470 2 1 471 472
1 471 2 1 470 472
1 472 0 0
1 473 2 1 474 475
1 474 2 1 473 475
1 475 0 0
1 476 2 1 477 478
1 477 2 1 476 478
1 478 0 0
1 479 2 1 480 481
1 480 2 1 479 481
1 481 0 0
1 482 2 1 483 484
1 483 2 1 482 484
1 484 0 0
1 485 2 1 486 487
1 486 2 1 485 487
1 487 0 0
1 488 2 1 489 490
1 489 2 1 488 490
1 490 0 0
1 491 2 1 492 493
1 492 2 1 491 493
1 493 0 0
1 494 2 1 495 496
1 495 2 1 494 496
1 496 0 0
1 497 2 1 498 499
1 498 2 1 497 499
1 499 0 0
1 500 2 1 501 502
1 501 2 1 500 502
1 502 0 0
1 503 2 1 504 505
1 504 2 1 503 505
1 505 0 0
1 506 2 1 507 508
1 507 2 1 506 508
1 508 0 0
1 509 2 1 510 511
1 510 2 1 509 511
1 511 0 0
1 512 2 1 513 514
1 513 2 1 512 514
1 514 0 0
1 515 2 1 516 517
1 516 2 1 515 517
1 517 0 0
1 518 2 1 519 520
1 519 2 1 518 520
1 520 0 0
1 521 2 1 522 523
1 522 2 1 521 523
1 523 0 0
1 524 2 1 525 526
1 525 2 1 524 526
1 526 0 0
1 527 2 1 528 529
1 528 2 1 527 529
1 529 0 0
1 530 2 1 531 532
1 531 2 1 530 532
1 532 0 0
1 533 2 1 534 535
1 534 2 1 533 535
1 535 0 0
1 536 2 1 537 538
1 537 2 1 536 538
1 538 0 0
1 539 2 1 540 541
1 540 2 1 539 541
1 541 0 0
1 542 2 1 543 544
1 543 2 1 542 544
1 544 0 0
1 545 2 1 546 547
1 546 2 1 545 547
1 547 0 0
1 548 2 1 549 550
1 549 2 1 548 550
1 550 0 0
1 551 2 1 552 553
1 552 2 1 551 553
1 553 0 0
1 554 2 1 555 556
1 555 2 1 554 556
1 556 0 0
1 557 2 1 558 559
1 558 2 1 557 559
1 559 0 0
1 560 2 1 561 562
1 561 2 1 560 562
1 562 0 0
1 563 2 1 564 565
1 564 2 1 563 565
1 565 0 0
1 566 2 1 567 568
1 567 2 1 566 568
1 568 0 0
1 569 2 1 570 571
1 570 2 1 569 571
1 571 0 0
1 572 2 1 573 574
1 573 2 1 572 574
1 574 0 0
1 575 2 1 576 577
1 576 2 1 575 577
1 577 0 0
1 578 2 1 579 580
1 579 2 1 578 580
1 580 0 0
1 581 2 1 582 583
1 582 2 1 581 583
1 583 0 0
1 584 2 1 585 586
1 585 2 1 584 586
1 586 0 0
1 587 2 1 588 589
1 588 2 1 587 589
1 589 0 0
1 590 2 1 591 592
1 591 2 1 590 592
1 592 0 0
1 593 2 1 594 595
1 594 2 1 593 595
1 595 0 0
1 596 2 1 597 598
1 597 2 1 596 598
1 598 0 0
1 599 2 1 600 601
1 600 2 1 599 601
1 601 0 0
1 602 2 1 603 604
1 603 2 1 602 604
1 604 0 0
1 605 2 1 606 607
1 606 2 1 605 607
1 607 0 0
1 608 2 1 609 610
1 609 2 1 608 610
1 610 0 0
1 611 2 1 612 613
1 612 2 1 611 613
1 613 0 0
1 614 2 1 615 616
1 615 2 1 614 616
1 616 0 0
1 617 2 1 618 619
1 618 2 1 617 619
1 619 0 0
1 620 2 1 621 622
1 621 2 1 620 622
1 622 0 0
1 623 2 1 624 625
1 624 2 1 623 625
1 625 0 0
1 626 2 1 627 628
1 627 2 1 626 628
1 628 0 0
1 629 2 1 630 631
1 630 2 1 629 631
1 631 0 0
1 632 2 1 633 634
1 633 2 1 632 634
1 634 0 0
1 635 2 1 636 637
1 636 2 1 635 637
1 637 0 0
1 638 2 1 639 640
1 639 2 1 638 640
1 640 0 0
1 641 2 1 642 643
1 642 2 1 641 643
1 643 0 0
1 644 2 1 645 646
1 645 2 1 644 646
1 646 0 0
1 647 2 1 648 649
1 648 2 1 647 649
1 649 0 0
1 650 2 1 651 652
1 651 2 1 650 652
1 652 0 0
1 653 2 1 654 655
1 654 2 1 653 655
1 655 0 0
1 656 2 1 657 658
1 657 2 1 656 658
1 658 0 0
1 659 2 1 660 661
1 660 2 1 659 661
1 661 0 0
1 662 2 1 663 664
1 663 2 1 662 664
1 664 0 0
1 665 2 1 666 667
1 666 2 1 665 667
1 667 0 0
1 668 2 1 669 670
1 669 2 1 668 670
1 670 0 0
1 671 2 1 672 673
1 672 2 1 671 673
1 673 0 0
1 674 2 1 675 676
1 675 2 1 674 676
1 676 0 0
1 677 2 1 678 679
1 678 2 1 677 679
1 679 0 0
1 680 2 1 681 682
1 681 2 1 680 682
1 682 0 0
1 683 2 1 684 685
1 684 2 1 683 685
1 685 0 0
1 686 2 1 687 688
1 687 2 1 686 688
1 688 0 0
1 689 2 1 690 691
1 690 2 1 689 691
1 691 0 0
1 692 2 1 693 694
1 693 2 1 692 694
1 694 | |
61
1 741 1 0 89
1 741 1 0 117
1 741 1 0 145
1 742 1 0 23
1 742 1 0 40
1 742 1 0 65
1 742 1 0 93
1 742 1 0 121
1 742 1 0 149
1 743 1 0 27
1 743 1 0 44
1 743 1 0 69
1 743 1 0 97
1 743 1 0 125
1 743 1 0 153
1 744 1 0 30
1 744 1 0 48
1 744 1 0 73
1 744 1 0 101
1 744 1 0 129
1 744 1 0 157
1 745 1 0 33
1 745 1 0 52
1 745 1 0 77
1 745 1 0 105
1 745 1 0 133
1 745 1 0 161
1 746 1 0 745
1 747 1 0 740
1 747 1 0 741
1 747 1 0 742
1 747 1 0 743
1 747 1 0 744
1 748 2 1 747 746
1 749 1 0 33
1 750 1 0 749
1 751 2 0 748 750
1 752 1 0 24
1 752 1 0 36
1 752 1 0 58
1 752 1 0 86
1 752 1 0 114
1 752 1 0 142
1 753 1 0 22
1 753 1 0 37
1 753 1 0 61
1 753 1 0 89
1 753 1 0 117
1 753 1 0 145
1 754 1 0 23
1 754 1 0 40
1 754 1 0 65
1 754 1 0 93
1 754 1 0 121
1 754 1 0 149
1 755 1 0 27
1 755 1 0 44
1 755 1 0 69
1 755 1 0 97
1 755 1 0 125
1 755 1 0 153
1 756 1 0 30
1 756 1 0 48
1 756 1 0 73
1 756 1 0 101
1 756 1 0 129
1 756 1 0 157
1 757 1 0 33
1 757 1 0 52
1 757 1 0 77
1 757 1 0 105
1 757 1 0 133
1 757 1 0 161
1 758 1 0 757
1 759 1 0 752
1 759 1 0 753
1 759 1 0 754
1 759 1 0 755
1 759 1 0 756
1 760 2 1 759 758
1 761 1 0 52
1 762 1 0 33
1 763 2 1 762 761
1 764 2 0 760 763
1 765 1 0 24
1 765 1 0 36
1 765 1 0 58
1 765 1 0 86
1 765 1 0 114
1 765 1 0 142
1 766 1 0 22
1 766 1 0 37
1 766 1 0 61
1 766 1 0 89
1 766 1 0 117
1 766 1 0 145
1 767 1 0 23
1 767 1 0 40
1 767 1 0 65
1 767 1 0 93
1 767 1 0 121
1 767 1 0 149
1 768 1 0 27
1 768 1 0 44
1 768 1 0 69
1 768 1 0 97
1 768 1 0 125
1 768 1 0 153
1 769 1 0 30
1 769 1 0 48
1 769 1 0 73
1 769 1 0 101
1 769 1 0 129
1 769 1 0 157
1 770 1 0 33
1 770 1 0 52
1 770 1 0 77
1 770 1 0 105
1 770 1 0 133
1 770 1 0 161
1 771 1 0 770
1 772 1 0 765
1 772 1 0 766
1 772 1 0 767
1 772 1 0 768
1 772 1 0 769
1 773 2 1 772 771
1 774 1 0 77
1 775 1 0 33
1 775 1 0 52
1 776 2 1 775 774
1 777 2 0 773 776
1 778 1 0 24
1 778 1 0 36
1 778 1 0 58
1 778 1 0 86
1 778 1 0 114
1 778 1 0 142
1 779 1 0 22
1 779 1 0 37
1 779 1 0 61
1 779 1 0 89
1 779 1 0 117
1 779 1 0 145
1 780 1 0 23
1 780 1 0 40
1 780 1 0 65
1 780 1 0 93
1 780 1 0 121
1 780 1 0 149
1 781 1 0 27
1 781 1 0 44
1 781 1 0 69
1 781 1 0 97
1 781 1 0 125
1 781 1 0 153
1 782 1 0 30
1 782 1 0 48
1 782 1 0 73
1 782 1 0 101
1 782 1 0 129
1 782 1 0 157
1 783 1 0 33
1 783 1 0 52
1 783 1 0 77
1 783 1 0 105
1 783 1 0 133
1 783 1 0 161
1 784 1 0 783
1 785 1 0 778
1 785 1 0 779
1 785 1 0 780
1 785 1 0 781
1 785 1 0 782
1 786 2 1 785 784
1 787 1 0 105
1 788 1 0 33
1 788 1 0 52
1 788 1 0 77
1 789 2 1 788 787
1 790 2 0 786 789
1 791 1 0 24
1 791 1 0 36
1 791 1 0 58
1 791 1 0 86
1 791 1 0 114
1 791 1 0 142
1 792 1 0 22
1 792 1 0 37
1 792 1 0 61
1 792 1 0 89
1 792 1 0 117
1 792 1 0 145
1 793 1 0 23
1 793 1 0 40
1 793 1 0 65
1 793 1 0 93
1 793 1 0 121
1 793 1 0 149
1 794 1 0 27
1 794 1 0 44
1 794 1 0 69
1 794 1 0 97
1 794 1 0 125
1 794 1 0 153
1 795 1 0 30
1 795 1 0 48
1 795 1 0 73
1 795 1 0 101
1 795 1 0 129
1 795 1 0 157
1 796 1 0 33
1 796 1 0 52
1 796 1 0 77
1 796 1 0 105
1 796 1 0 133
1 796 1 0 161
1 797 1 0 796
1 798 1 0 791
1 798 1 0 792
1 798 1 0 793
1 798 1 0 794
1 798 1 0 795
1 799 2 1 798 797
1 800 1 0 133
1 801 1 0 33
1 801 1 0 52
1 801 1 0 77
1 801 1 0 105
1 802 2 1 801 800
1 803 2 0 799 802
1 804 1 0 24
1 804 1 0 36
1 804 1 0 58
1 804 1 0 86
1 804 1 0 114
1 804 1 0 142
1 805 1 0 22
1 805 1 0 37
1 805 1 0 61
1 805 1 0 89
1 805 1 0 117
1 805 1 0 145
1 806 1 0 23
1 806 1 0 40
1 806 1 0 65
1 806 1 0 93
1 806 1 0 121
1 806 1 0 149
1 807 1 0 27
1 807 1 0 44
1 807 1 0 69
1 807 1 0 97
1 807 1 0 125
1 807 1 0 153
1 808 1 0 30
1 808 1 0 48
1 808 1 0 73
1 808 1 0 101
1 808 1 0 129
1 808 1 0 157
1 809 1 0 33
1 809 1 0 52
1 809 1 0 77
1 809 1 0 105
1 809 1 0 133
1 809 1 0 161
1 810 1 0 809
1 811 1 0 804
1 811 1 0 805
1 811 1 0 806
1 811 1 0 807
1 811 1 0 808
1 812 2 1 811 810
1 813 1 0 161
1 814 1 0 33
1 814 1 0 52
1 814 1 0 77
1 814 1 0 105
1 814 1 0 133
1 815 2 1 814 813
1 816 2 0 812 815
1 817 1 0 24
1 817 1 0 36
1 817 1 0 58
1 817 1 0 86
1 817 1 0 114
1 817 1 0 142
1 818 1 0 22
1 818 1 0 37
1 818 1 0 61
1 818 1 0 89
1 818 1 0 117
1 818 1 0 145
1 819 1 0 23
1 819 1 0 40
1 819 1 0 65
1 819 1 0 93
1 819 1 0 121
1 819 1 0 149
1 820 1 0 27
1 820 1 0 44
1 820 1 0 69
1 820 1 0 97
1 820 1 0 125
1 820 1 0 153
1 821 1 0 30
1 821 1 0 48
1 821 1 0 73
1 821 1 0 101
1 821 1 0 129
1 821 1 0 157
1 822 1 0 33
1 822 1 0 52
1 822 1 0 77
1 822 1 | |
<filename>simulator_control/simulator_util.py
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The utility class for simulator."""
import json
import logging
import os
import pwd
import re
import shutil
import subprocess
import time
from xctestrunner.shared import ios_constants
from xctestrunner.shared import ios_errors
from xctestrunner.shared import plist_util
from xctestrunner.shared import xcode_info_util
from xctestrunner.simulator_control import simtype_profile
_SIMULATOR_STATES_MAPPING = {
0: ios_constants.SimState.CREATING,
1: ios_constants.SimState.SHUTDOWN,
3: ios_constants.SimState.BOOTED
}
_PREFIX_RUNTIME_ID = 'com.apple.CoreSimulator.SimRuntime.'
_SIM_OPERATION_MAX_ATTEMPTS = 3
_SIMCTL_MAX_ATTEMPTS = 2
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC = 10
_SIMULATOR_SHUTDOWN_TIMEOUT_SEC = 30
_SIM_ERROR_RETRY_INTERVAL_SEC = 2
_SIM_CHECK_STATE_INTERVAL_SEC = 0.5
_PATTERN_APP_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(UIKitApplication:%s(.+)\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\((.+)xctest\[[0-9]+\]\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_CORESIMULATOR_CRASH = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(com\.apple\.CoreSimulator(.+)\): Service exited due to ')
class Simulator(object):
"""The object for simulator in MacOS."""
def __init__(self, simulator_id):
"""Constructor of Simulator object.
Args:
simulator_id: string, the identity of the simulator.
"""
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
@property
def simulator_id(self):
if not self._simulator_id:
raise ios_errors.SimError(
'The simulator has not been created or has been deleted.')
return self._simulator_id
@property
def simulator_system_log_path(self):
return os.path.join(self.simulator_log_root_dir, 'system.log')
@property
def simulator_root_dir(self):
"""Gets the simulator's root directory."""
if not self._simulator_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(
'%s/Library/Developer/CoreSimulator/Devices/%s' %
(home_dir, self.simulator_id))
return self._simulator_root_dir
@property
def simulator_log_root_dir(self):
"""Gets the root directory of the simulator's logs."""
if not self._simulator_log_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(
'%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id))
return self._simulator_log_root_dir
@property
def device_plist_object(self):
"""Gets the plist_util.Plist object of device.plist of the simulator.
Returns:
a plist_util.Plist object of device.plist of the simulator or None when
the simulator does not exist or is being created.
"""
if not self._device_plist_object:
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if not os.path.exists(device_plist_path):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
def Shutdown(self):
"""Shuts down the simulator."""
sim_state = self.GetSimulatorState()
if sim_state == ios_constants.SimState.SHUTDOWN:
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if sim_state == ios_constants.SimState.CREATING:
raise ios_errors.SimError(
'Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if 'Unable to shutdown device in current state: Shutdown' in str(e):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError('Failed to shutdown simulator %s: %s' %
(self.simulator_id, str(e)))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
def Delete(self):
"""Deletes the simulator asynchronously.
The simulator state should be SHUTDOWN when deleting it. Otherwise, it will
raise exception.
Raises:
ios_errors.SimError: The simulator's state is not SHUTDOWN.
"""
# In Xcode 9+, simctl can delete Booted simulator. In prior of Xcode 9,
# we have to shutdown the simulator first before deleting it.
if xcode_info_util.GetXcodeVersionNumber() < 900:
sim_state = self.GetSimulatorState()
if sim_state != ios_constants.SimState.SHUTDOWN:
raise ios_errors.SimError(
'Can only delete the simulator with state SHUTDOWN. The current '
'state of simulator %s is %s.' % (self._simulator_id, sim_state))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setpgrp)
# The delete command won't delete the simulator log directory.
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
"""Gets simulator log via running `log` tool on simulator.
Args:
output_file_path: string, the path of the stdout file.
start_time: datetime, the start time of the simulatro log.
end_time: datetime, the end time of the simulatro log.
"""
command = [
'xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show',
'--style', 'syslog'
]
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to get log on simulator %s: %s' %
(self.simulator_id, str(e)))
def GetAppDocumentsPath(self, app_bundle_id):
"""Gets the path of the app's Documents directory."""
if xcode_info_util.GetXcodeVersionNumber() >= 830:
try:
app_data_container = RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id, 'data'
])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(
'Failed to get data container of the app %s in simulator %s: %s' %
(app_bundle_id, self._simulator_id, str(e)))
apps_dir = os.path.join(self.simulator_root_dir,
'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(
os.path.join(apps_dir, sub_dir_name,
'.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField(
'MCMMetadataIdentifier')
if current_app_bundle_id == app_bundle_id:
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(
'Failed to get Documents directory of the app %s in simulator %s' %
(app_bundle_id, self._simulator_id))
def IsAppInstalled(self, app_bundle_id):
"""Checks if the simulator has installed the app with given bundle id."""
try:
RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id
])
return True
except ios_errors.SimError:
return False
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
"""Waits until the simulator state becomes SHUTDOWN.
Args:
timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN
in seconds.
Raises:
ios_errors.SimError: when it is timeout to wait the simulator state
becomes SHUTDOWN.
"""
start_time = time.time()
while start_time + timeout_sec >= time.time():
if self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN:
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError('Timeout to wait for simulator shutdown in %ss.' %
timeout_sec)
def GetSimulatorState(self):
"""Gets the state of the simulator in real time.
Returns:
shared.ios_constants.SimState, the state of the simulator.
Raises:
ios_errors.SimError: The state can not be recognized.
"""
if self.device_plist_object is None:
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if state_num not in _SIMULATOR_STATES_MAPPING.keys():
logging.warning('The state %s of simulator %s can not be recognized.',
state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
"""Creates a new simulator according to arguments.
If neither device_type nor os_version is given, will use the latest iOS
version and latest iPhone type.
If os_version is given but device_type is not, will use latest iPhone type
according to the OS version limitation. E.g., if the given os_version is 9.3,
the latest simulator type is iPhone 6s Plus. Because the min OS version of
iPhone 7 is 10.0.
If device_type is given but os_version is not, will use the min value
between max OS version of the simulator type and current latest OS version.
E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,
will use 10.2. Because the max OS version of iPhone 5 is 10.2.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
name_prefix: string, name prefix of the new simulator. By default, it is
"New".
Returns:
a tuple with four items:
string, id of the new simulator.
string, simulator device type of the new simulator.
string, OS version of the new simulator.
string, name of the new simulator.
Raises:
ios_errors.SimError: when failed to create new simulator.
ios_errors.IllegalArgumentError: when the given argument is invalid.
"""
if not device_type:
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if not os_version:
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if os_version not in supported_sim_os_versions:
raise ios_errors.IllegalArgumentError(
'The simulator os version %s is not supported. Supported simulator '
'os versions are %s.' % (os_version, supported_sim_os_versions))
if not device_type:
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if not name_prefix:
name_prefix = 'New'
name = '%s-%s-%s' % (name_prefix, device_type, os_version)
# Example
# Runtime ID of iOS 10.2: com.apple.CoreSimulator.SimRuntime.iOS-10-2
runtime_id = _PREFIX_RUNTIME_ID + os_type + '-' + os_version.replace('.', '-')
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name,
os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(
['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to create simulator: %s' % str(e))
new_simulator_obj = Simulator(new_simulator_id)
# After creating a new simulator, its state is CREATING. When the
# simulator's state becomes SHUTDOWN, the simulator is created.
try:
new_simulator_obj.WaitUntilStateShutdown(
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return new_simulator_id, device_type, os_version, name
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id,
error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if i != _SIM_OPERATION_MAX_ATTEMPTS - | |
'left'
delete_old : bool
Whether to delete the old column (``X[left_on]``)
Default = True
Examples
--------
TODO
"""
def __init__(self, df, left_on, right_on, how='left', delete_old=True):
# Check types
if not isinstance(df, pd.DataFrame):
raise TypeError('df must be a pandas DataFrame')
if not isinstance(left_on, str):
raise TypeError('left_on must be a str')
if not isinstance(right_on, str):
raise TypeError('right_on must be a str')
if not isinstance(how, str):
raise TypeError('how must be a str')
if how not in ['left', 'right', 'outer', 'inner']:
raise TypeError('how must be left, right, outer, or inner')
if not isinstance(delete_old, bool):
raise TypeError('delete_old must be a bool')
# Store parameters
self.df = df
self.left_on = left_on
self.right_on = right_on
self.how = how
self.delete_old = delete_old
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Perform the join transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
index_name = 'index' if Xo.index.name is None else Xo.index.name
Xo = (Xo.reset_index()
.merge(self.df, left_on=self.left_on,
right_on=self.right_on, how=self.how)
.set_index(index_name))
if self.delete_old:
if self.right_on in Xo:
del Xo[self.right_on]
if self.left_on in Xo:
del Xo[self.left_on]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class JoinColumns(BaseEstimator, TransformerMixin):
"""Join multiple columns.
Parameters
----------
cols : list of str
Columns to join
name : str
Name for the new column
sep : str
Separator string to use.
Default = ','
delete_old : bool
Whether to delete the columns merged to make the new columns.
Default = True
Examples
--------
TODO
"""
def __init__(self, cols, name, sep=',', delete_old=True):
# Check types
if not isinstance(cols, (str, list)):
raise TypeError('cols must be a str or list of str')
if not isinstance(name, str):
raise TypeError('name must be a str')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
if not isinstance(delete_old, bool):
raise TypeError('delete_old must be a bool')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.name = name
self.sep = sep
self.delete_old = delete_old
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Join the columns
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
data = [Xo[c].tolist() for c in self.cols]
Xo[self.name] = [self.sep.join([e[i] for e in data
if isinstance(e[i], str)
and len(e[i])>0])
for i in range(X.shape[0])]
if self.delete_old:
for col in self.cols:
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class LambdaTransformer(BaseEstimator, TransformerMixin):
"""Transform column(s) with lambda functions
Parameters
----------
transforms : dict
Dictionary of transforms to perform on each column. Keys should be
column names, and values should be lambda functions.
Examples
--------
TODO
"""
def __init__(self, transforms):
# Check types
if not isinstance(transforms, dict):
raise TypeError('transforms must be a dict')
if not all(isinstance(e, str) for e in transforms.keys()):
raise TypeError('transforms keys must be str')
if not all(callable(e) for e in transforms.values()):
raise TypeError('transforms values must be callable')
# Store parameters
self.transforms = transforms
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Perform the join transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, transform in self.transforms.items():
Xo[col] = Xo[col].apply(transform)
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class LambdaFeatures(BaseEstimator, TransformerMixin):
"""Create new features.
Parameters
----------
features : dict
Dictionary of features to create. Keys should contain names for the
new columns, and values should be functions. The function should take
one argument (the X dataframe), and return a series containing
the new feature.
Examples
--------
TODO
"""
def __init__(self, features):
# Check types
if not isinstance(features, dict):
raise TypeError('features must be a dict')
for col, feat in features.items():
if not isinstance(col, str):
raise TypeError('features keys must be str')
if not callable(feat):
raise TypeError('features values must be callable')
# Store parameters
self.features = features
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Create the new features.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, feat in self.features.items():
Xo[col] = feat(Xo)
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
def null_encode(X, y=None, cols=None, suffix='_isnull', dtype='uint8',
delete_old=False):
"""Null encode columns in a DataFrame.
For each column with null values, adds a column containing indicators
as to whether each sample in original column is null.
Parameters
----------
cols : list of str
Columns to null encode. Default is to null encode all columns in
the DataFrame which contain null values.
suffix : str
Suffix to append to original column names to create null indicator
column names
dtype : str
Datatype to use for encoded columns.
Default = 'uint8'
delete_old : bool
Whether to delete the old column which was encoded
Default = False
Returns
-------
pandas DataFrame
Null encoded DataFrame
"""
ne = NullEncoder(cols=cols, suffix=suffix, dtype=dtype,
delete_old=delete_old)
return ne.fit_transform(X, y)
def label_encode(X, y=None, cols=None):
"""Label encode columns in a DataFrame.
Replaces categorical column(s) with integer labels for each unique
category in original column.
Parameters
----------
cols : list of str
Columns to label encode. Default is to label encode all categorical
columns in the DataFrame.
Returns
-------
pandas DataFrame
Label encoded DataFrame
"""
le = LabelEncoder(cols=cols)
return le.fit_transform(X, y)
def one_hot_encode(X, y=None, cols=None, reduce_df=False, dtype='uint8'):
"""One-hot encode columns in a DataFrame.
Replaces categorical column(s) with binary columns for each unique value
in original column.
Parameters
----------
cols : list of str
Columns to one-hot encode. Default is to one-hot encode all
categorical columns in the DataFrame.
reduce_df : bool
Whether to use reduced degrees of freedom for encoding (that is,
add N-1 one-hot columns for a column with N categories). E.g. for
a column with categories A, B, and C: When reduce_df is True,
A=[1, 0], B=[0, 1], and C=[0, 0]. When reduce_df is False,
A=[1, 0, 0], B=[0, 1, 0], and C=[0, 0, 1].
Default = False
dtype : str
Datatype to use for encoded columns. Default = 'uint8'
Returns
-------
pandas DataFrame
One-hot encoded DataFrame
"""
ohe = OneHotEncoder(cols=cols, reduce_df=reduce_df, dtype=dtype)
return ohe.fit_transform(X, y)
def target_encode(X, y=None, cols=None, dtype='float64'):
"""Target encode columns in a DataFrame.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category.
Parameters
----------
cols | |
<reponame>mikedh/kinematic
"""
kinematic.py
--------------
PROTOTYPE for support for rigid body kinematics. Not sure if
supporting this is something that is feasible or desirable for trimesh,
this is just exploring what it might look like. If this improves, it
could feasibly live as `trimesh.kinematic`.
Challenge: create a data structure which can (mostly) hold and
cross-convert GLTF Skeletons, OpenRave XML Robots, and URDF robots.
Uses sympy to produce numpy-lambdas for forward kinematics, which once computed
are quite fast (for Python anyway) to execute.
"""
import trimesh
import abc
import sympy as sp
import numpy as np
import networkx as nx
ABC = trimesh.util.ABC
# for debugging
from trimesh.exchange.threedxml import print_element as pp # NOQA
try:
import lxml.etree as etree
except BaseException as E:
etree = trimesh.exceptions.ExceptionModule(E)
class KinematicChain(object):
"""
A mechanism which consists of geometry (`Link` objects) connected
by variable transforms (`Joint` objects).
"""
def __init__(self,
joints,
links,
base_link='base'):
"""
Create a kinematic chain.
Parameters
--------------
joints : dict
Joint name to `Joint` objects
links : (m,) Link object
Link name to `Link` objects
base_link : str
Name of base link
"""
# save passed joints and links
self.joints = joints
self.links = links
# which link is the first
self.base_link = base_link
@property
def base_frame(self):
# TODO : figure something out here
return self.base_link
@property
def parameters(self):
"""
What are the variables that define the state of the chain.
Returns
---------
parameters : (n,) sympy.Symbol
Ordered parameters
"""
return [i.parameter for i in self.joints.values()]
@property
def limits(self):
limits = np.sort([j.limits for j in
self.joints.values()], axis=1)
return limits
def graph(self):
"""
Get a directed graph where joints are edges between links.
Returns
----------
graph : networkx.DiGraph
Graph containing connectivity information
"""
graph = nx.DiGraph()
for name, joint in self.joints.items():
graph.add_edge(*joint.connects, joint=name)
return graph
def scene(self):
"""
Get a scene containing the geometry for every link.
Returns
-----------
scene : trimesh.Scene
Scene with link geometry
"""
geometry = {}
for name, link in self.links.items():
geometry.update(link.geometry)
base_frame = self.base_frame
graph = trimesh.scene.transforms.SceneGraph()
graph.from_edgelist([(base_frame, geom_name,
{'geometry': geom_name})
for geom_name in geometry.keys()])
graph.update(frame_from=graph.base_frame, frame_to=base_frame)
scene = trimesh.Scene(geometry, graph=graph)
return scene
def show(self):
"""
Open a pyglet window showing all geometry.
"""
self.scene().show()
def paths(self):
"""
Find the route from the base body to every link.
Returns
---------
joint_paths : dict
Keys are link names, values are a list of joint objects
"""
base = self.base_link
graph = self.graph()
paths = {}
for b in self.links.values():
try:
paths[b.name] = shortest(graph, base, b.name)
except BaseException as E:
print('exception:', E)
joint_paths = {}
for body, path in paths.items():
joint_paths[body] = [graph.get_edge_data(a, b)['joint']
for a, b in zip(path[:-1], path[1:])]
return joint_paths
def forward_kinematics(self):
"""
Get the symbolic sympy forward kinematics.
Returns
-----------
symbolic : dict
Keyed by body to a sympy matrix
"""
def product(L):
if len(L) == 0:
return sp.Matrix.eye(4)
cum = L[0]
for i in L[1:]:
cum *= i
return cum
# routes to base link
paths = self.paths()
# symbolic matrices
matrices = {name: j.matrix for name, j in self.joints.items()}
#
combined = {k: product([matrices[i] for i in path])
for k, path in paths.items()}
return combined
def forward_kinematics_lambda(self):
"""
Get a numpy-lambda for evaluating forward kinematics relatively
quickly.
Returns
-----------
lambdas : dict
Link name to function which takes float values
corresponding to self.parameters.
"""
# a symbolic equation for every link
combined = self.forward_kinematics()
return {k: sp.lambdify(self.parameters, c)
for k, c in combined.items()}
def shortest(graph, a, b):
"""
Try to find a shortest path between two nodes.
Parameters
-------------
graph : networkx.DiGraph
Graph with nodes
a : str
Source node
b : str
Destination node
Returns
----------
path : (n,) str
Path between `a` and `b`
"""
try:
s = nx.shortest_path(graph, a, b)
return s
except BaseException:
# try traversing the DiGraph backwards
s = nx.shortest_path(graph, b, a)
return s[::-1]
class Joint(ABC):
"""
The base class for `Joint` objects, or connections
between `Link` objects which contain geometry.
"""
@abc.abstractmethod
def matrix(self):
"""
The symbolic homogenous transformation matrix between
`self.connects[0]` and `self.connects[1]`.
Returns
-----------
matrix : sympy.Matrix
Transform with `self.parameter` as a variable
"""
raise NotImplementedError('call a subclass!')
@property
def connects(self):
"""
The name of the two links this joint is connecting.
Returns
-------------
connects : (2,) list
The name of two `Link` objects
"""
return self._connects
@connects.setter
def connects(self, values):
if values is None or len(values) != 2:
raise ValueError('`connects` must be two link names!')
self._connects = values
@property
def limits(self):
"""
The
"""
if hasattr(self, '_limits'):
return self._limits
return [-np.inf, np.inf]
@limits.setter
def limits(self, values):
if values is not None:
self._limits = values
class RotaryJoint(Joint):
def __init__(self,
name,
axis,
connects,
initial=None,
limits=None,
anchor=None):
"""
Create a rotary joint between two links.
Parameters
-------------
name : str
The name of this joint.
axis : (3,) float
The unit vector this joint revolves around.
connects : (2,) str
The name of the two `Link` objects this joint connects
initial : None or (4, 4) float
Initial transformation.
limits : None or (2,) float
The limits of this joint in radians.
anchor : None or (3,) float
The point in space anchoring this joint,
also known as the origin of the axis line
"""
# the unit vector axis
self.axis = np.array(axis, dtype=np.float64).reshape(3)
# the point around which to rotate
if anchor is None:
self.anchor = np.zeros(3)
else:
self.anchor = np.array(anchor, dtype=np.float64)
# the name of the joint
self.name = name
# which links is this a joint between?
self.connects = connects
# the value to symbolically represent joint position
self.parameter = sp.Symbol(name)
self.initial = initial
self.limits = limits
@property
def matrix(self):
# inherit the docstring from the base class
# self.parameter is a `sympy.Symbol` so the returned
# transformation matrix will also be symbolic
matrix = trimesh.transformations.rotation_matrix(
angle=self.parameter,
direction=self.axis,
point=self.anchor)
if self.initial is not None:
matrix = matrix * sp.Matrix(self.initial)
return matrix
class LinearJoint(Joint):
def __init__(self, name, axis, connects, limits=None):
"""
Create a linear (also known as prismatic) joint between
two `Link` objects.
Parameters
-------------
name : str
The name of the joint
axis : (3,) float
The vector along which the joint translates
connects : (2,) list
Which links does the joint connect
limits : None or (2,) float
What are the limits of the joint
"""
self.parameter = sp.Symbol(name)
self.connects = connects
self.limits = limits
if axis is None or len(axis) != 3:
raise ValueError('axis must be (3,) float!')
# save axis as a unit vector
self.axis = np.array(axis, dtype=np.float64)
self.axis /= np.linalg.norm(self.axis)
@property
def matrix(self):
"""
Get a parametrized transformation for this joint.
Returns
-----------
matrix : (4, 4) sp.Matrix
Transform parameterized by self.parameter
"""
# start with an identity matrix
translation = sp.Matrix.eye(4)
# self.axis is a unit vector
translation[:3, 3] = self.axis * self.parameter
return translation
class Link(object):
def __init__(self, name, geometry):
"""
`Link` objects store geometry.
Parameters
------------
name : str
The name of the Link object
geometry : dict
Any geometry that this link contains
"""
self.name = name
self.geometry = geometry
def show(self, **kwargs):
trimesh.Scene(self.geometry).show(**kwargs)
def _parse_file(file_obj, ext):
"""
Load an XML file from a file path or ZIP archive.
Parameters
----------
file_obj : str
Path to an XML file or ZIP archive
ext : str
Desired extension of XML-like file
Returns
-----------
tree : lxml.etree.Etr
"""
# make sure extension is in the format '.extension'
ext = '.' + ext.lower().strip().lstrip('.')
# load our file into an etree and a resolver
if trimesh.util.is_string(file_obj):
if file_obj.lower().endswith(ext):
# path was passed to actual XML file so resolver can use that path
resolver = trimesh.visual.resolvers.FilePathResolver(file_obj)
tree = etree.parse(file_obj)
elif file_obj.lower().endswith('.zip'):
# load the ZIP archive
with open(file_obj, 'rb') as f:
archive = trimesh.util.decompress(f, 'zip')
# find the first key in the archive that matches our extension
# this will be screwey if there are multiple XML files
key = next(k for k in archive.keys()
if k.lower().endswith(ext))
# load the XML file into an etree
tree = etree.parse(archive[key])
# create a resolver from the archive
resolver = trimesh.visual.resolvers.ZipResolver(archive)
else:
raise ValueError(f'must be | |
<filename>build/lib/tracc/tracc.py
import tracc
import pandas as pd
import numpy as np
class costs:
def __init__(self,
travelcosts_df,
columns = None
):
"""
Inputs data and prunes columns if desired
"""
if columns is not None:
self.data = travelcosts_df[columns]
else:
self.data = travelcosts_df
def intrazonal(self,
cost_column,
origin_column,
destination_column,
method = "constant",
value = 0,
polygon_file = None,
polygon_id = None
):
"""
Computes and updates intrazonal travel cost in a travel costs matrix. The output will include a travel cost between any origin or destination location in the matrix to itself.
Parameters
----------
cost_column : column name for travel costs
origin_column : column name for origin IDs
destinationn_column : column name for origin IDs
method : "constant" applies a single @value to all intrazonal travel costs. "radius" applies a cost which is proportional to the radius of a circle with the same area as its input polygon
value : parameters for the method
polygon_file : file path to an input spatial polygon (e.g. geojson) if needed (it is for method = "radius")
polygon_id : ID field for the polygon_file needed for joining to the cost matrix
"""
# making sure ID columns are strings for a merge later on
self.data[origin_column] = self.data[origin_column].astype(str)
self.data[destination_column] = self.data[destination_column].astype(str)
# getting set of unique locations in the dataset
locations = list(self.data[origin_column].unique()) + list(self.data[destination_column].unique())
locations = list(set(locations))
if method == "constant":
new_times = [value] * len(locations)
df = pd.DataFrame(
list(zip(locations, locations, new_times)),
columns =[origin_column, destination_column, cost_column + "_i"])
elif method == "radius":
from tracc.spatial import radius
# compute based on the equivilant radius of each polygon
df = radius(polygon_file,polygon_id)
df[origin_column] = df[polygon_id]
df[destination_column] = df[polygon_id]
del df[polygon_id]
df[cost_column + "_i"] = value * df["radius"]
del df["radius"]
else:
raise Exception("Method can only be 'constant' or 'radius'")
df[origin_column] = df[origin_column].astype(str)
df[destination_column] = df[destination_column].astype(str)
# join in the newly created intrazonal travel times
self.data = pd.merge(self.data, df, how='outer', left_on=[origin_column, destination_column], right_on = [origin_column, destination_column])
# replace the older intrazonal travel times
self.data[cost_column] = np.where((self.data[cost_column + "_i"] >= 0),self.data[cost_column + "_i"],self.data[cost_column])
del self.data[cost_column + "_i"]
def fill_missing_costs(
self,
cost_column,
origin_column,
destination_column,
spatial_file_path,
spatial_file_id,
where = "origin",
weight_type = "Queen"
):
"""
Completes an OD matrix by filling locations that were missing from the original matrix, based on a neighbourhood spatial weights matrix. For example if a origin zone has no travel costs, it presumes its travel costs to destinations are the average of the same costs of its neighbouring zones.
"""
from tracc.spatial import area
# get list of zones which are missing from the input costs table
dfz = area(spatial_file_path, spatial_file_id)
dfz[spatial_file_id] = dfz[spatial_file_id].astype(str)
self.data[origin_column] = self.data[origin_column].astype(str)
li1 = list(self.data[origin_column].unique())
li2 = list(dfz[spatial_file_id].unique())
missing = [x for x in li2 if x not in li1]
del li1,li2
if len(missing) == 0:
return None
if where == "origin":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times to other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[origin_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([destination_column], as_index=False)[cost_column].mean())
temp[origin_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
elif where == "destination":
# get neighbours for each missing zone
from tracc.spatial import get_neighbours
neighbours = get_neighbours(spatial_file_path, "Queen", spatial_file_id)
new_times = []
# for each zone, compute average travel times from other zones based on neighbours
for location in missing:
locneigh = neighbours[location]
temp = self.data[self.data[destination_column].isin(locneigh)]
temp = pd.DataFrame(temp.groupby([origin_column], as_index=False)[cost_column].mean())
temp[destination_column] = location
new_times.append(temp)
# combine the outputs, and concat to the input times
new_times = pd.concat(new_times)
self.data = pd.concat([self.data, new_times])
else:
raise Exception("Input paramater @where should either be 'origin' or 'destination'")
def generalized_cost(
self,
columns,
coefficients,
exponents = None,
prune_output = True,
output_cost_name = "GC"
):
"""
Computes generalized costs
"""
# need to add a column check warning, and make the intercept = 0 if none is provided
# set all exponents as 1 if none are inputted
if exponents is None:
exponents = [1] * len(columns)
# compute the generalized cost value
self.data[output_cost_name] = coefficients[len(coefficients) - 1]
i = 0
while i < len(columns):
self.data[output_cost_name] = self.data[output_cost_name] + coefficients[i] * self.data[columns[i]] ** exponents[i]
i += 1
# delete initital cost columns if desired
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
def impedence_calc(
self,
cost_column,
impedence_func,
impedence_func_params,
prune_output = False,
output_col_name = "fCij"
):
"""
Measures impdence given input of travel cost and selected impedence funciton and parameters
# To Do: add in more impdence function options
"""
if impedence_func == "cumulative":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.cumulative,args = (impedence_func_params,))
elif impedence_func == "linear":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.linear,args = (impedence_func_params,))
elif impedence_func == "exponential":
self.data[output_col_name] = self.data[cost_column].apply(tracc.decay.exponential,args = (impedence_func_params,))
else:
raise Exception("Please select an appropriate decay function")
if prune_output is True:
del self.data[cost_column]
def impedence_combine(self,
columns,
how = "product",
output_col_name = "fCij",
prune_output = True
):
"""
If there are multiple impedences, and we want to combine them into a single impedence value. This is similar to genearlized cost.
For example, if we have an impedence value for transit travel time, and we also want to remove any trips based on a fare criteria, it can be applied in this way.
"""
if how == "product":
self.data[output_col_name] = 1
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] * self.data[columns[i]]
i += 1
elif how == "sum":
self.data[output_col_name] = 0
i = 0
while i < len(columns):
self.data[output_col_name] = self.data[output_col_name] + self.data[columns[i]]
i += 1
else:
raise Exception('the input @how must be one of "product" or "sum"')
def max_impedence(self,
columns,
imp_col_name = "fCij"
):
"""
Reduces the cost table to only include rows with the maximum impedence value for the set of input columns.
For example, if there 3 transit trips from i to j, each with a different computed generalized_cost resulting from different route choices, this function will return the row with the one resulting in the greatest impedence value (i.e. lowest generalized cost)
"""
self.data = self.data.groupby(columns)[imp_col_name].max().reset_index()
class supply:
def __init__(self,
supply_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = supply_df[columns]
else:
self.data = supply_df
def weight(self,
columns,
weights,
weight_col_name = "Oj",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by destinations by their desirability.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input columns used in the weight function
"""
if len(columns) != len(weights):
raise Exception("Please make sure columns and weights are lists of the same length")
if len(columns) < 2:
raise Exception("Can only weight opportunities if 2 or more are inputted")
if sum(weights) < 0.999 or sum(weights) > 1.001:
print("WARNING: the inputted weights do not sum to 1.")
self.data[weight_col_name] = 0
i = 0
while i < len(columns):
self.data[weight_col_name] = self.data[weight_col_name] + weights[i] * self.data[columns[i]]
i += 1
if prune_output is True:
for col in list(set(columns)):
del self.data[col]
class demand:
def __init__(self,
demand_df,
columns = None
):
"""
intitializing can include pruning the dataset to a list of @column names
"""
if columns is not None:
self.data = demand_df[columns]
else:
self.data = demand_df
def weight(self,
columns,
weights,
weight_col_name = "Pi",
prune_output = True
):
"""
Creating a value based on a weighted linear combination other values. Can be used to weight by population groups by their propensity to travel to certain activity types.
Parameters
----------------
columns : columns in which to input into the weights function
weights : linear multipliers, the same length as the weights
weight_col_name : output column name
prune_output : if True, delete all input |