hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a06e6e1a73c3036047979830e8ab7be408183aa
| 13,453
|
py
|
Python
|
implementation 20 x 15.py
|
dhavaldarbar/SIT723_ProjectA-
|
52a0494fc41774020ae8e1a9ab69496478ae4242
|
[
"0BSD"
] | null | null | null |
implementation 20 x 15.py
|
dhavaldarbar/SIT723_ProjectA-
|
52a0494fc41774020ae8e1a9ab69496478ae4242
|
[
"0BSD"
] | null | null | null |
implementation 20 x 15.py
|
dhavaldarbar/SIT723_ProjectA-
|
52a0494fc41774020ae8e1a9ab69496478ae4242
|
[
"0BSD"
] | null | null | null |
import numpy as np
import random
import sys
import time
# Fisher and Thompson 20x5 instance, alternate name (mt20)
# 20 5
# 0 29 1 9 2 49 3 62 4 44
# 0 43 1 75 3 69 2 46 4 72
# 1 91 0 39 2 90 4 12 3 45
# 1 81 0 71 4 9 2 85 3 22
# 2 14 1 22 0 26 3 21 4 72
# 2 84 1 52 4 48 0 47 3 6
# 1 46 0 61 2 32 3 32 4 30
# 2 31 1 46 0 32 3 19 4 36
# 0 76 3 76 2 85 1 40 4 26
# 1 85 2 61 0 64 3 47 4 90
# 1 78 3 36 0 11 4 56 2 21
# 2 90 0 11 1 28 3 46 4 30
# 0 85 2 74 1 10 3 89 4 33
# 2 95 0 99 1 52 3 98 4 43
# 0 6 1 61 4 69 2 49 3 53
# 1 2 0 95 3 72 4 65 2 25
# 0 37 2 13 1 21 3 89 4 55
# 0 86 1 74 4 88 2 48 3 79
# 1 69 2 51 0 11 3 89 4 74
# 0 13 1 7 2 76 3 52 4 45
power = {
0: {0:92, 1:49, 2:93, 3:48, 4:1, 5:52, 6:57, 7:16, 8:6, 9:6, 10:19, 11:96, 12:27, 13:76, 14:60},
1: {0:4, 1:96, 2:52, 3:87, 4:94, 5:83, 6:9, 7:85, 8:47, 9:63, 10:31, 11:26, 12:46, 13:49, 14:48},
2: {0:34, 1:34, 2:37, 3:82, 4:25, 5:43, 6:11, 7:71, 8:55, 9:34, 10:77, 11:20, 12:89, 13:23, 14:32},
3: {0:49, 1:12, 2:52, 3:76, 4:64, 5:51, 6:84, 7:42, 8:5, 9:45, 10:20, 11:93, 12:48, 13:75, 14:100},
4: {0:35, 1:1, 2:15, 3:49, 4:78, 5:80, 6:99, 7:88, 8:24, 9:20, 10:100, 11:28, 12:71, 13:1, 14:7},
5: {0:69, 1:24, 2:21, 3:3, 4:28, 5:8, 6:42, 7:33, 8:40, 9:50, 10:8, 11:5, 12:13, 13:42, 14:73},
6: {0:83, 1:15, 2:62, 3:27, 4:5, 5:65, 6:100, 7:65, 8:82, 9:89, 10:81, 11:92, 12:38, 13:47, 14:96},
7: {0:98, 1:24, 2:75, 3:57, 4:93, 5:74, 6:10, 7:44, 8:59, 9:51, 10:82, 11:65, 12:8, 13:12, 14:24},
8: {0:55, 1:44, 2:47, 3:75, 4:81, 5:30, 6:42, 7:100, 8:81, 9:29, 10:31, 11:47, 12:34, 13:77, 14:92},
9: {0:18, 1:42, 2:37, 3:1, 4:67, 5:20, 6:91, 7:21, 8:57, 9:100, 10:100, 11:59, 12:77, 13:21, 14:98},
10: {0:42, 1:16, 2:19, 3:70, 4:7, 5:74, 6:7, 7:50, 8:74, 9:46, 10:88, 11:71, 12:42, 13:34, 14:60},
11: {0:12, 1:45, 2:7, 3:15, 4:22, 5:31, 6:70, 7:88, 8:46, 9:44, 10:45, 11:87, 12:5, 13:99, 14:70},
12: {0:51, 1:39, 2:50, 3:39, 4:23, 5:28, 6:49, 7:5, 8:17, 9:40, 10:30, 11:62, 12:65, 13:84, 14:12},
13: {0:92, 1:67, 2:85, 3:88, 4:18, 5:13, 6:70, 7:69, 8:20, 9:52, 10:42, 11:82, 12:19, 13:21, 14:5},
14: {0:34, 1:60, 2:52, 3:70, 4:51, 5:2, 6:43, 7:75, 8:45, 9:53, 10:96, 11:1, 12:44, 13:66, 14:19},
15: {0:31, 1:44, 2:84, 3:16, 4:10, 5:4, 6:48, 7:67, 8:11, 9:21, 10:78, 11:42, 12:44, 13:37, 14:35},
16: {0:20, 1:40, 2:37, 3:68, 4:42, 5:11, 6:6, 7:44, 8:43, 9:17, 10:3, 11:77, 12:100, 13:82, 14:5},
17: {0:14, 1:5, 2:40, 3:70, 4:63, 5:59, 6:42, 7:74, 8:32, 9:50, 10:21, 11:29, 12:83, 13:64, 14:45},
18: {0:70, 1:28, 2:79, 3:25, 4:98, 5:24, 6:54, 7:65, 8:93, 9:74, 10:22, 11:73, 12:75, 13:69, 14:9},
19: {0:100, 1:46, 2:69, 3:41, 4:3, 5:18, 6:41, 7:94, 8:97, 9:30, 10:96, 11:7, 12:86, 13:83, 14:90}
}
machine = {
0: {0:3, 1:1, 2:2, 3:6, 4:0, 5:4, 6:5, 7:8, 8:12, 9:13, 10:11, 11:9, 12:7, 13:14, 14:10},
1: {0:5, 1:3, 2:6, 3:1, 4:2, 5:4, 6:0, 7:11, 8:10, 9:8, 10:9, 11:13, 12:12, 13:7, 14:14},
2: {0:1, 1:6, 2:4, 3:2, 4:0, 5:5, 6:3, 7:9, 8:14, 9:7, 10:11, 11:12, 12:8, 13:10, 14:13},
3: {0:3, 1:5, 2:6, 3:2, 4:0, 5:1, 6:4, 7:10, 8:12, 9:7, 10:8, 11:11, 12:14, 13:13, 14:9},
4: {0:2, 1:1, 2:3, 3:6, 4:5, 5:4, 6:0, 7:9, 8:7, 9:11, 10:10, 11:8, 12:14, 13:13, 14:12},
5: {0:3, 1:6, 2:5, 3:4, 4:1, 5:2, 6:0, 7:10, 8:11, 9:9, 10:8, 11:13, 12:12, 13:7, 14:14},
6: {0:0, 1:4, 2:2, 3:6, 4:5, 5:1, 6:3, 7:14, 8:10, 9:7, 10:13, 11:9, 12:8, 13:11, 14:12},
7: {0:6, 1:4, 2:2, 3:0, 4:1, 5:3, 6:5, 7:7, 8:13, 9:11, 10:12, 11:14, 12:10, 13:8, 14:9},
8: {0:4, 1:0, 2:3, 3:5, 4:2, 5:6, 6:1, 7:10, 8:8, 9:7, 10:13, 11:9, 12:11, 13:12, 14:14},
9: {0:2, 1:5, 2:0, 3:4, 4:3, 5:6, 6:1, 7:8, 8:14, 9:12, 10:10, 11:11, 12:13, 13:9, 14:7},
10: {0:3, 1:1, 2:4, 3:6, 4:2, 5:0, 6:5, 7:12, 8:9, 9:8, 10:14, 11:13, 12:10, 13:7, 14:11},
11: {0:6, 1:4, 2:2, 3:0, 4:1, 5:3, 6:5, 7:13, 8:9, 9:8, 10:14, 11:12, 12:11, 13:7, 14:10},
12: {0:4, 1:5, 2:0, 3:2, 4:3, 5:6, 6:1, 7:13, 8:12, 9:14, 10:10, 11:11, 12:8, 13:7, 14:9},
13: {0:6, 1:0, 2:5, 3:1, 4:3, 5:4, 6:2, 7:7, 8:14, 9:13, 10:8, 11:11, 12:10, 13:12, 14:9},
14: {0:4, 1:0, 2:1, 3:5, 4:2, 5:6, 6:3, 7:10, 8:11, 9:8, 10:12, 11:13, 12:14, 13:7, 14:9},
15: {0:6, 1:1, 2:0, 3:3, 4:4, 5:2, 6:5, 7:13, 8:14, 9:12, 10:8, 11:7, 12:11, 13:9, 14:10},
16: {0:1, 1:4, 2:3, 3:2, 4:6, 5:0, 6:5, 7:10, 8:11, 9:12, 10:14, 11:7, 12:13, 13:9, 14:8},
17: {0:5, 1:0, 2:3, 3:1, 4:4, 5:2, 6:6, 7:9, 8:13, 9:7, 10:10, 11:14, 12:12, 13:11, 14:8},
18: {0:6, 1:0, 2:3, 3:4, 4:5, 5:2, 6:1, 7:12, 8:13, 9:10, 10:7, 11:9, 12:11, 13:8, 14:14},
19: {0:5, 1:2, 2:4, 3:3, 4:1, 5:6, 6:0, 7:8, 8:11, 9:12, 10:14, 11:7, 12:9, 13:13, 14:10}
}
# used_machine ={}
debug=False
# def prepare_used_machine_dict(j, i, m, tuple):
# input1 = 215013424301243205154530
def solution(li):
job_data = {}
result_data = {}
used_machine = {}
# li = [int(x) for x in str(input1)]
for j in li:
if j in job_data:
job_data[j] = job_data[j] + 1
else:
job_data[j] = 0
i = job_data[j]
m = machine[j][i]
tuple = (j,i)
if i > 0:
if m in used_machine:
my_list = used_machine[m]
new_tuple = (j,i-1)
ans = max(result_data[my_list[len(my_list)-1]], result_data[new_tuple]) + power[j][i]
result_data[tuple] = ans
else:
new_tuple = (j,i-1)
ans = result_data[new_tuple] + power[j][i]
result_data[tuple] = ans
else:
if m in used_machine:
my_list = used_machine[m]
# print(result_data)
ans = result_data[my_list[len(my_list)-1]] + power[j][i]
result_data[tuple] = ans
else:
ans = power[j][i]
result_data[tuple] = ans
if m in used_machine:
used_machine[m].append(tuple)
else:
used_machine[m] = [tuple]
#print("result_data ",result_data)
# import pprint
# pprint.pprint(c)
# print("max ", max(list(result_data.values())))
return max(list(result_data.values()))
def generateRandomSequence():
indexes = np.array([i for i in range(0,6)])
# print(indexes)
random.shuffle(indexes)
print("Initial Solution",indexes)
return indexes.tolist()
def swapTwoJobs(seq,pos1,pos2):
seq[pos1], seq[pos2] = seq[pos2], seq[pos1]
return seq
def simulated_annealing(old_seq,Ti = 10000,Tf = 1 ,alpha = 0.80):
#Number of jobs given
n = len(old_seq)
default_timer = time.time
s = default_timer.__call__()
# neh=NEH()
# #Initialize the primary seq
# # old_seq,schedules,old_makeSpan, _ = self.palmer_heuristic()
# # old_seq, schedules, old_makeSpan, _ = n.nehAlgo(self.data, self.nb_machines, self.nb_jobs)
# old_seq, old_makeSpan = neh.nehAlgo(
# self.data, self.nb_machines, self.nb_jobs)
# print('Initial Sequence of NEH =>', old_seq)
# print('Initial Sequence of NEH makespan =>', old_makeSpan)
# old_seq=self.generateRandomSequence()
# old_makeSpan = commonFunction.makespan(old_seq, self.data, self.nb_machines)[
# self.nb_machines - 1][len(old_seq)]
# old_seq = [3,4,2,4,2,3,0,1,2,4,3,2,0,5,1,5,4,5,3,0,1,2,3,4]
old_makeSpan = solution(old_seq)
if debug:
print('Initial Sequence =>', old_seq)
print('Initial Sequence makespan =>', old_makeSpan)
bestSolution = old_makeSpan
bestSequence = old_seq
new_seq = []
delta_mk1 = 0
#Initialize the temperature
if debug:
print("Initial Temperature =>",Ti)
print("Final Temperature =>",Tf)
print("Alpha",alpha)
T = Ti
Tf = Tf
alpha = alpha
# of iterations
temp_cycle = 0
count =0
count2 = 0
while T >= Tf :
count = count+1
for i1 in range(30):
count2 +=1
new_seq = old_seq.copy()
# Insertion method
# job = new_seq.pop(randint(0,n-1))
# new_seq.insert(randint(0,n-1),job) # Swap and insertion for new sequence
#Swap Method
u,v=random.randint(0, n-1), random.randint(0, n-1)
job=u
new_seq=swapTwoJobs(new_seq,u,v)
# print("new_seq ",new_seq)
# Call solution
new_make_span = solution(new_seq)
# new_make_span = commonFunction.makespan(new_seq, self.data, self.nb_machines)[
# self.nb_machines - 1][len(new_seq)]
# new_make_span = self._get_makespan(new_seq,self.data)
if debug:
print('Job :',job)
print('New Sequence :', new_seq)
print('New Sequence make span:', new_make_span)
delta_mk1 = new_make_span - old_makeSpan
# print("Delta ",delta_mk1)
# if delta_mk1 <= 0:
# old_seq = new_seq
# old_makeSpan = new_make_span
r=(old_seq == new_seq)
if debug:
print('Check Sequence Change',r)
if r == False:
if new_make_span < old_makeSpan:
if debug:
print("MakeSpan Swap Sequence", "new_make_span =>",new_make_span," old_makeSpan=>",old_makeSpan)
old_seq = new_seq
old_makeSpan = new_make_span
else :
delta_mk1 = new_make_span - old_makeSpan
Aprob = np.exp((-1*(delta_mk1)/T))
p = np.random.uniform(0.1,0.9)
if debug:
print("Proability",p)
print("Delta Change ", delta_mk1)
print("Aprob => ", Aprob)
print(p <= Aprob)
if p < Aprob:
old_seq = new_seq
old_makeSpan = new_make_span
if debug:
print("Proability Swap Sequence")
else :
#The solution is discarded
if debug:
print("Discard Iteration")
print('Old Sequence :',old_seq)
pass
T = T * alpha
if debug:
print("New Temperature=>",T)
temp_cycle += 1
if bestSolution > old_makeSpan:
bestSolution = old_makeSpan
bestSequence = old_seq
if debug:
print("Best Solution Swap")
print('New Swap Sequence :', bestSequence)
print('New Sequence make span:', bestSolution)
print("Iteration Count:", count)
e = default_timer.__call__()
if debug:
print("Best Sequence",bestSequence)
print("Best MakeSpan", bestSolution)
print("Iteration Count:", count)
#Result Sequence
# seq=bestSequence
# old_makeSpan=bestSolution
print(bestSolution-old_makeSpan)
d = float((bestSolution-old_makeSpan)/old_makeSpan)
return bestSequence, bestSolution, e-s, d
# old_seq = [3,4,2,4,2,3,0,1,2,4,3,2,0,5,1,5,4,5,3,0,1,2,3,4] ,0,4,3,2,1,3,0,4,2,3,2,1,0,3,1,0,2,4,3,0,1,2,3,0,4,2,3,1,4,1
new_seq = [16,17,0,19,10,7,4,1,16,7,4,15,5,16,14,
7,0,2,19,5,7,1,4,16,7,18,15,5,2,19,
16,3,7,4,18,15,5,16,10,18,19,7,2,3,18,
12,15,17,19,7,18,4,6,7,12,18,19,1,15,3,
7,12,18,4,5,14,19,12,14,18,17,18,10,7,12,
2,17,15,13,1,7,6,4,10,7,13,3,6,2,0,
7,4,13,10,0,6,1,7,13,8,5,6,8,3,13,
12,14,0,4,10,6,8,13,16,2,5,10,15,19,13,
3,14,6,8,4,13,1,8,6,13,17,13,18,5,17,
9,11,0,8,1,11,6,8,10,11,0,4,9,3,11,
8,15,6,5,11,15,8,18,10,12,19,0,15,2,11,
9,18,14,6,8,19,10,11,4,12,14,16,8,19,9,
16,11,1,12,10,16,15,8,11,6,16,8,18,12,5,
8,11,13,0,10,12,9,0,3,8,11,6,10,4,17,
9,1,14,6,0,19,17,14,2,5,6,17,0,16,4,
11,9,13,10,14,11,3,2,5, 16,18,9,11, 17,4,
1,1,5,9,10,3,15,13,16,17,2,3,18,12,15,5,
9,11,15,0,17,12,13,2,1,3,14,12,14,5,9,
15,17,2,1,19,0,12,3,13,0,9,19,2,17,14,
16,3,1,14,9,17,3,14,2,19,9,2,1,9]
n = len(new_seq)
for j in range(5):
new_seq, best_value, best_time, delta = simulated_annealing(new_seq)
print("Best Sequence",new_seq)
print("Best MakeSpan", best_value)
print("Time duration", best_time)
print("Delta ", delta)
for i in range(10):
u,v=random.randint(0, n-1), random.randint(0, n-1)
new_seq=swapTwoJobs(new_seq,u,v)
#print("New Random Seq ", new_seq)
# generateRandomSequence()
# generateRandomSequence()
# print(generateRandomSequence())
# print(solution(old_seq))
# solution(old_seq)
# solution(new_seq)
# solution(424230124320515453012343)
# solution(242301243205154530123434)
# solution(423012432051545301234342)
# print()
# pprint.pprint(used_machine)
| 41.779503
| 123
| 0.499145
|
4a06e997a7c2b97ac26e09bfcfa020bd92baeab4
| 44
|
py
|
Python
|
social_app_django_multitenant/tests/__init__.py
|
CitooZz/Social-App-Django-Multitenant
|
df2b163263cf73bf5e85bbe97a4a98d173cc775c
|
[
"MIT"
] | null | null | null |
social_app_django_multitenant/tests/__init__.py
|
CitooZz/Social-App-Django-Multitenant
|
df2b163263cf73bf5e85bbe97a4a98d173cc775c
|
[
"MIT"
] | 1
|
2018-01-09T12:00:41.000Z
|
2018-01-09T12:00:41.000Z
|
social_app_django_multitenant/tests/__init__.py
|
AgilisHQ/social-app-django-multitenant
|
2081884ecc09c8026d1e15e123e8c68a2595f73b
|
[
"MIT"
] | null | null | null |
from .test_multitenant_social_auth import *
| 22
| 43
| 0.863636
|
4a06ea0e9a21764295f806e8a97e0a5852ba8dc1
| 3,455
|
py
|
Python
|
purity_fb/purity_fb_1dot6/models/bucket_post.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot6/models/bucket_post.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot6/models/bucket_post.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.6 Python SDK
Pure Storage FlashBlade REST 1.6 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.6
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class BucketPost(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account': 'Reference'
}
attribute_map = {
'account': 'account'
}
def __init__(self, account=None): # noqa: E501
"""BucketPost - a model defined in Swagger""" # noqa: E501
self._account = None
self.discriminator = None
if account is not None:
self.account = account
@property
def account(self):
"""Gets the account of this BucketPost. # noqa: E501
the account of the bucket # noqa: E501
:return: The account of this BucketPost. # noqa: E501
:rtype: Reference
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this BucketPost.
the account of the bucket # noqa: E501
:param account: The account of this BucketPost. # noqa: E501
:type: Reference
"""
self._account = account
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BucketPost, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BucketPost):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.089431
| 204
| 0.571925
|
4a06ea9500770987fb29dd1e4154cd2680ee95e5
| 9,366
|
py
|
Python
|
pandas/core/dtypes/base.py
|
henighan/pandas
|
45d8d77f27cf0dbc8cefe932f8fb64f6982b9527
|
[
"BSD-3-Clause"
] | 1
|
2020-11-11T05:28:41.000Z
|
2020-11-11T05:28:41.000Z
|
pandas/core/dtypes/base.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/dtypes/base.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | null | null | null |
"""Extend pandas with custom array types"""
from typing import Any, List, Optional, Tuple, Type
import numpy as np
from pandas.errors import AbstractMethodError
from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries
class ExtensionDtype:
"""
A custom data type, to be paired with an ExtensionArray.
.. versionadded:: 0.23.0
See Also
--------
extensions.register_extension_dtype
extensions.ExtensionArray
Notes
-----
The interface includes the following abstract methods that must
be implemented by subclasses:
* type
* name
* construct_from_string
The following attributes influence the behavior of the dtype in
pandas operations
* _is_numeric
* _is_boolean
Optionally one can override construct_array_type for construction
with the name of this dtype via the Registry. See
:meth:`extensions.register_extension_dtype`.
* construct_array_type
The `na_value` class attribute can be used to set the default NA value
for this type. :attr:`numpy.nan` is used by default.
ExtensionDtypes are required to be hashable. The base class provides
a default implementation, which relies on the ``_metadata`` class
attribute. ``_metadata`` should be a tuple containing the strings
that define your data type. For example, with ``PeriodDtype`` that's
the ``freq`` attribute.
**If you have a parametrized dtype you should set the ``_metadata``
class property**.
Ideally, the attributes in ``_metadata`` will match the
parameters to your ``ExtensionDtype.__init__`` (if any). If any of
the attributes in ``_metadata`` don't implement the standard
``__eq__`` or ``__hash__``, the default implementations here will not
work.
.. versionchanged:: 0.24.0
Added ``_metadata``, ``__hash__``, and changed the default definition
of ``__eq__``.
For interaction with Apache Arrow (pyarrow), a ``__from_arrow__`` method
can be implemented: this method receives a pyarrow Array or ChunkedArray
as only argument and is expected to return the appropriate pandas
ExtensionArray for this dtype and the passed values::
class ExtensionDtype:
def __from_arrow__(
self, array: pyarrow.Array/ChunkedArray
) -> ExtensionArray:
...
This class does not inherit from 'abc.ABCMeta' for performance reasons.
Methods and properties required by the interface raise
``pandas.errors.AbstractMethodError`` and no ``register`` method is
provided for registering virtual subclasses.
"""
_metadata: Tuple[str, ...] = ()
def __str__(self) -> str:
return self.name
def __eq__(self, other: Any) -> bool:
"""
Check whether 'other' is equal to self.
By default, 'other' is considered equal if either
* it's a string matching 'self.name'.
* it's an instance of this type and all of the
the attributes in ``self._metadata`` are equal between
`self` and `other`.
Parameters
----------
other : Any
Returns
-------
bool
"""
if isinstance(other, str):
try:
other = self.construct_from_string(other)
except TypeError:
return False
if isinstance(other, type(self)):
return all(
getattr(self, attr) == getattr(other, attr) for attr in self._metadata
)
return False
def __hash__(self) -> int:
return hash(tuple(getattr(self, attr) for attr in self._metadata))
def __ne__(self, other) -> bool:
return not self.__eq__(other)
@property
def na_value(self):
"""
Default NA value to use for this type.
This is used in e.g. ExtensionArray.take. This should be the
user-facing "boxed" version of the NA value, not the physical NA value
for storage. e.g. for JSONArray, this is an empty dictionary.
"""
return np.nan
@property
def type(self) -> Type:
"""
The scalar type for the array, e.g. ``int``
It's expected ``ExtensionArray[item]`` returns an instance
of ``ExtensionDtype.type`` for scalar ``item``, assuming
that value is valid (not NA). NA values do not need to be
instances of `type`.
"""
raise AbstractMethodError(self)
@property
def kind(self) -> str:
"""
A character code (one of 'biufcmMOSUV'), default 'O'
This should match the NumPy dtype used when the array is
converted to an ndarray, which is probably 'O' for object if
the extension type cannot be represented as a built-in NumPy
type.
See Also
--------
numpy.dtype.kind
"""
return "O"
@property
def name(self) -> str:
"""
A string identifying the data type.
Will be used for display in, e.g. ``Series.dtype``
"""
raise AbstractMethodError(self)
@property
def names(self) -> Optional[List[str]]:
"""
Ordered list of field names, or None if there are no fields.
This is for compatibility with NumPy arrays, and may be removed in the
future.
"""
return None
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
raise NotImplementedError
@classmethod
def construct_from_string(cls, string: str):
r"""
Construct this type from a string.
This is useful mainly for data types that accept parameters.
For example, a period dtype accepts a frequency parameter that
can be set as ``period[H]`` (where H means hourly frequency).
By default, in the abstract class, just the name of the type is
expected. But subclasses can overwrite this method to accept
parameters.
Parameters
----------
string : str
The name of the type, for example ``category``.
Returns
-------
ExtensionDtype
Instance of the dtype.
Raises
------
TypeError
If a class cannot be constructed from this 'string'.
Examples
--------
For extension dtypes with arguments the following may be an
adequate implementation.
>>> @classmethod
... def construct_from_string(cls, string):
... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
... match = pattern.match(string)
... if match:
... return cls(**match.groupdict())
... else:
... raise TypeError(f"Cannot construct a '{cls.__name__}' from
... " "'{string}'")
"""
if not isinstance(string, str):
raise TypeError(f"Expects a string, got {type(string).__name__}")
if string != cls.name:
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
return cls()
@classmethod
def is_dtype(cls, dtype) -> bool:
"""
Check if we match 'dtype'.
Parameters
----------
dtype : object
The object to check.
Returns
-------
is_dtype : bool
Notes
-----
The default implementation is True if
1. ``cls.construct_from_string(dtype)`` is an instance
of ``cls``.
2. ``dtype`` is an object and is an instance of ``cls``
3. ``dtype`` has a ``dtype`` attribute, and any of the above
conditions is true for ``dtype.dtype``.
"""
dtype = getattr(dtype, "dtype", dtype)
if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)):
# https://github.com/pandas-dev/pandas/issues/22960
# avoid passing data to `construct_from_string`. This could
# cause a FutureWarning from numpy about failing elementwise
# comparison from, e.g., comparing DataFrame == 'category'.
return False
elif dtype is None:
return False
elif isinstance(dtype, cls):
return True
if isinstance(dtype, str):
try:
return cls.construct_from_string(dtype) is not None
except TypeError:
return False
return False
@property
def _is_numeric(self) -> bool:
"""
Whether columns with this dtype should be considered numeric.
By default ExtensionDtypes are assumed to be non-numeric.
They'll be excluded from operations that exclude non-numeric
columns, like (groupby) reductions, plotting, etc.
"""
return False
@property
def _is_boolean(self) -> bool:
"""
Whether this dtype should be considered boolean.
By default, ExtensionDtypes are assumed to be non-numeric.
Setting this to True will affect the behavior of several places,
e.g.
* is_bool
* boolean indexing
Returns
-------
bool
"""
return False
| 29.828025
| 86
| 0.595772
|
4a06eac655fb4463472edfa7578203ddda643b43
| 446
|
py
|
Python
|
pandas/read_custom_data_format_4.py
|
tisnik/poc-schema-checks
|
80499bd3a1de0b949e85e5f0fea9f945cafd012e
|
[
"Apache-2.0"
] | null | null | null |
pandas/read_custom_data_format_4.py
|
tisnik/poc-schema-checks
|
80499bd3a1de0b949e85e5f0fea9f945cafd012e
|
[
"Apache-2.0"
] | null | null | null |
pandas/read_custom_data_format_4.py
|
tisnik/poc-schema-checks
|
80499bd3a1de0b949e85e5f0fea9f945cafd012e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# vim: set fileencoding=utf-8
"""Reading data file with custom format. separator + skip rows specification."""
import pandas
df = pandas.read_csv("denni_kurz.txt", sep="|", skiprows=1)
df["kurz"] = pandas.to_numeric(df["kurz"].str.replace(',','.'), errors='coerce')
print("Data frame")
print("---------------------------")
print(df)
print()
print("Column types")
print("---------------------------")
print(df.dtypes)
| 22.3
| 80
| 0.598655
|
4a06eb1c09c8346cb90cba58ffd4aa8584c54dde
| 1,726
|
py
|
Python
|
conu/fixtures/__init__.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 95
|
2018-05-19T14:35:08.000Z
|
2022-01-08T23:31:40.000Z
|
conu/fixtures/__init__.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 179
|
2017-09-12T11:14:30.000Z
|
2018-04-26T05:36:13.000Z
|
conu/fixtures/__init__.py
|
lslebodn/conu
|
dee6fd958471f77d1c0511b031ea136dfaf8a77a
|
[
"MIT"
] | 16
|
2018-05-09T14:15:32.000Z
|
2021-08-02T21:11:33.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright Contributors to the Conu project.
# SPDX-License-Identifier: MIT
#
"""
This submodule contains `pytest <https://docs.pytest.org/en/latest/>`_ fixtures
which can be utilized when writing tests for your containers while using conu
and pytest.
"""
import logging
from conu import DockerBackend, PodmanBackend
from conu.backend.buildah.backend import BuildahBackend
from conu.utils import run_cmd
import pytest
@pytest.fixture()
def docker_backend():
"""
pytest fixture which mimics context manager: it provides new instance of DockerBackend and
cleans after it once it's used; sample usage:
::
def test_my_container(docker_backend):
image = docker_backend.ImageClass("fedora", tag="27")
:return: instance of DockerBackend
"""
with DockerBackend(logging_level=logging.DEBUG) as backend:
yield backend
backend._clean()
@pytest.fixture()
def podman_backend():
"""
pytest fixture which mimics context manager: it provides new instance of PodmanBackend and
cleans after it once it's used; behaves the same as docker_backend fixture
:return: instance of PodmanBackend
"""
with PodmanBackend(logging_level=logging.DEBUG) as backend:
yield backend
backend._clean()
@pytest.fixture()
def buildah_backend():
"""
pytest fixture which mimics context manager: it provides new instance of BuildahBackend and
cleans after it once it's used; behaves the same as docker_backend fixture
:return: instance of BuildahBackend
"""
with BuildahBackend(logging_level=logging.DEBUG) as backend:
yield backend
backend._clean()
| 27.396825
| 99
| 0.705678
|
4a06eb1d74c532878be72406f68886e0cbcd79af
| 2,187
|
py
|
Python
|
tutorials/filter_edges.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tutorials/filter_edges.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
tutorials/filter_edges.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Compute filter edges
====================
In this tutorial we are going to learn how to compute the first order and second
order derivatives of an image using `kornia.filters`.
"""
import cv2
import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
import kornia
#############################
# We use OpenCV to load an image to memory represented in a numpy.ndarray
img_bgr: np.ndarray = cv2.imread('./data/doraemon.png', cv2.IMREAD_COLOR)
#############################
# Convert the numpy array to torch
x_bgr: torch.Tensor = kornia.image_to_tensor(img_bgr)
x_rgb: torch.Tensor = kornia.bgr_to_rgb(x_bgr)
#############################
# Create batch and normalize
x_rgb = x_rgb.expand(2, -1, -1, -1) # 4xCxHxW
x_gray = kornia.rgb_to_grayscale(x_rgb.float() / 255.)
def imshow(input: torch.Tensor):
out: torch.Tensor = torchvision.utils.make_grid(input, nrow=2, padding=1)
out_np: np.ndarray = kornia.tensor_to_image(out)
plt.imshow(out_np)
plt.axis('off')
#############################
# Show original
imshow(x_rgb)
#################################
# Compute the 1st order derivates
grads: torch.Tensor = kornia.spatial_gradient(x_gray, order=1) # BxCx2xHxW
grads_x = grads[:, :, 0]
grads_y = grads[:, :, 1]
#################################
# Show first derivatives in x
imshow(grads_x)
#################################
# Show first derivatives in y
imshow(grads_y)
#################################
# Sobel Edges
# Once with the gradients in the two directions we can computet the Sobel edges.
# However, in kornia we already have it implemented.
x_sobel: torch.Tensor = kornia.sobel(x_gray)
imshow(x_sobel)
#################################
# Compute the 2nd order derivates
grads: torch.Tensor = kornia.spatial_gradient(x_gray, order=2) # BxCx2xHxW
grads_x = grads[:, :, 0]
grads_y = grads[:, :, 1]
#################################
# Show second derivatives in x
imshow(grads_x)
#################################
# Show second derivatives in y
imshow(grads_y)
#################################
# Comute Laplacian Edges
x_laplacian: torch.Tensor = kornia.laplacian(x_gray, kernel_size=5)
imshow(x_laplacian)
| 27
| 80
| 0.606767
|
4a06ebba588f0911fd38861a9a4519f417d71f96
| 2,391
|
py
|
Python
|
data/water/CMEMS_ocean_pH/viz/generate.py
|
ilopezgp/human_impacts
|
b2758245edac0946080a647f1dbfd1098c0f0b27
|
[
"MIT"
] | 4
|
2020-08-25T00:52:01.000Z
|
2020-11-16T16:57:46.000Z
|
data/water/CMEMS_ocean_pH/viz/generate.py
|
ilopezgp/human_impacts
|
b2758245edac0946080a647f1dbfd1098c0f0b27
|
[
"MIT"
] | 5
|
2020-10-30T21:22:55.000Z
|
2021-12-30T02:07:02.000Z
|
data/water/CMEMS_ocean_pH/viz/generate.py
|
ilopezgp/human_impacts
|
b2758245edac0946080a647f1dbfd1098c0f0b27
|
[
"MIT"
] | 2
|
2020-08-28T10:11:28.000Z
|
2020-11-11T07:58:46.000Z
|
#%%
import numpy as np
import pandas as pd
import altair as alt
import anthro.io
# Load the production data.
data = pd.read_csv('../processed/CMEMS_average_ocean_pH.csv')
data['year'] = pd.to_datetime(data['year'], format='%Y')
data['pH'] = round(data['pH'], 3)
#%%
# Generate a plot for global average surface pH
chart = alt.Chart(data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field='pH', type='quantitative', title='ocean pH', scale=alt.Scale(domain=[8.05, 8.12])),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field='pH', type='nominal', title='pH')]
).properties(width='container', height=300)
# Add uncertainty bands
bands = chart.mark_area(color='dodgerblue', fillOpacity=0.4).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y('pH_uncert_low:Q', scale=alt.Scale(zero=False)),
y2='pH_uncert_high:Q'
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(l, p, bands)
layer.save('surface_ocean_pH.json')
# %%
# Load the production data.
data = pd.read_csv('../processed/CMEMS_trends_ocean_pH.csv')
data['year'] = pd.to_datetime(data['year'], format='%Y')
agg_data = pd.DataFrame()
agg_data['year'] = data[data['Measure type']=='[H+] percentage trend']['year'][1:]
agg_data['H+ percentage trend'] = data[data['Measure type']=='[H+] percentage trend']['Value'][1:]
pd.set_option("display.max_rows", None, "display.max_columns", None)
#%%
# Generate a plot for global average surface pH
chart = alt.Chart(agg_data).encode(
x=alt.X(field='year', type='temporal', timeUnit='year', title='year'),
y=alt.Y(field=r'H+ percentage trend', type='quantitative', title=r'[H+] percentage change'),
tooltip=[alt.Tooltip(field='year', type='temporal', title='year', format='%Y'),
alt.Tooltip(field=r'H+ percentage trend', type='nominal', title=r'H+ percentage trend')]
).properties(width='container', height=300)
l = chart.mark_line(color='dodgerblue')
p = chart.mark_point(color='dodgerblue', filled=True)
layer = alt.layer(l, p)
layer.save('percentage_change_H.json')
# %%
| 41.224138
| 109
| 0.648683
|
4a06ec29e854c19bb204261b3bf0453fdf347009
| 2,748
|
py
|
Python
|
nonlinear/nonlinear.py
|
xi2pi/cardioLPN
|
34759fea55f73312ccb8fb645ce2d04a0e2dddea
|
[
"MIT"
] | null | null | null |
nonlinear/nonlinear.py
|
xi2pi/cardioLPN
|
34759fea55f73312ccb8fb645ce2d04a0e2dddea
|
[
"MIT"
] | null | null | null |
nonlinear/nonlinear.py
|
xi2pi/cardioLPN
|
34759fea55f73312ccb8fb645ce2d04a0e2dddea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 10:43:24 2019
@author: CatOnTour
"""
from sympy import *
from cardioLPN import *
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
def sympy_to_num_den(xpr, s=Symbol('s')):
num, den = simplify(xpr).as_numer_denom() # expressions
p_num_den = poly(num, s), poly(den, s) # polynomials
c_num_den = [expand(p).all_coeffs() for p in p_num_den] # coefficients
l_num, l_den = [lambdify((), c)() for c in c_num_den] # convert to floats
return l_num, l_den
A_result = A_L(0.1)
Y_result = simplify(trans_A2Y(A_result))
num00, den00 = sympy_to_num_den(Y_result[0, 0])
cont_sys_00 = signal.lti(num00, den00)
num01, den01 = sympy_to_num_den(Y_result[0, 1])
cont_sys_01 = signal.lti(num01, den01)
I1 = cont_sys_00.output([2,2,2,2], [0,1,2,3])[1] + cont_sys_01.output([1,1,1,1], [0,1,2,3])[1]
plt.plot(I1)
plt.show()
'''variation of L'''
# step 1
A_result = A_L(0.1)
Y_result = simplify(trans_A2Y(A_result))
num00, den00 = sympy_to_num_den(Y_result[0, 0])
cont_sys_00 = signal.lti(num00, den00)
num01, den01 = sympy_to_num_den(Y_result[0, 1])
cont_sys_01 = signal.lti(num01, den01)
I1_1 = cont_sys_00.output([2,2,2,2], [0,1,2,3])[1] + cont_sys_01.output([1,1,1,1], [0,1,2,3])[1]
# step 2
A_result = A_L(0.2)
Y_result = simplify(trans_A2Y(A_result))
num00, den00 = sympy_to_num_den(Y_result[0, 0])
cont_sys_00 = signal.lti(num00, den00)
num01, den01 = sympy_to_num_den(Y_result[0, 1])
cont_sys_01 = signal.lti(num01, den01)
I1_2 = cont_sys_00.output([2,2,2,2], [3,4,5,6])[1] + cont_sys_01.output([1,1,1,1], [3,4,5,6])[1]
I1 = np.append(I1_1[:-1], I1_2 + I1_1[-1])
plt.plot(I1)
plt.show()
#num10, den10 = sympy_to_num_den(Y_result[1, 0])
#cont_sys_10 = signal.lti(num10, den10)
#
#num11, den11 = sympy_to_num_den(Y_result[1, 1])
#cont_sys_11 = signal.lti(num11, den11)
#
#
#I2 = cont_sys_10.output([2,2,2,2], [0,1,2,3])[1] + cont_sys_11.output([1,1,1,1], [0,1,2,3])[1]
#
#plt.plot(-I2)
#plt.show()
'''old'''
# inputs
#plt.plot(signal.unit_impulse(100))
#plt.show()
#
#plt.plot(np.append([0], np.linspace(0,100,101)),np.append([0], np.ones(101)))
#plt.show()
#A_result = simplify(A_RCR)
#
#num00, den00 = sympy_to_num_den(A_result[0, 0])
#cont_sys_00 = signal.lti(num00, den00)
#
#num01, den01 = sympy_to_num_den(A_result[0, 1])
#cont_sys_01 = signal.lti(num01, den01)
#
#
#u1 = cont_sys_00.step()[1] - cont_sys_01.impulse()[1]
#
#plt.plot(u1)
#plt.show()
#
#num10, den10 = sympy_to_num_den(A_result[1, 0])
#cont_sys_10 = signal.lti(num10, den10)
#
#num11, den11 = sympy_to_num_den(A_result[1, 1])
#cont_sys_11 = signal.lti(num11, den11)
#i2 = cont_sys_10.step()[1] - cont_sys_11.impulse()[1]
#
#plt.plot(i2)
#plt.show()
| 21.984
| 96
| 0.672489
|
4a06eca3e2640378ad00296de84e7f92b46b7da1
| 3,427
|
py
|
Python
|
taxamatcher/_tm_main.py
|
mmtechslv/taxamatcher
|
6a98def85723588a4517a9493db0a3c0fafac0db
|
[
"Apache-2.0"
] | 1
|
2019-03-11T20:14:59.000Z
|
2019-03-11T20:14:59.000Z
|
taxamatcher/_tm_main.py
|
mmtechslv/taxamatcher
|
6a98def85723588a4517a9493db0a3c0fafac0db
|
[
"Apache-2.0"
] | null | null | null |
taxamatcher/_tm_main.py
|
mmtechslv/taxamatcher
|
6a98def85723588a4517a9493db0a3c0fafac0db
|
[
"Apache-2.0"
] | 1
|
2019-03-11T20:17:22.000Z
|
2019-03-11T20:17:22.000Z
|
from . import _jadval as jadval
from . import _tm_constants as TMC
import csv
import os
def error_pass(code):
if type(code)==str:
if code not in TMC.ERROR_CODES.keys():
return True
else:
return False
elif type(code)==type:
if issubclass(code,BaseException):
return False
else:
return True
else:
return True
return True
def error_codes():
return TMC.ERROR_CODES.keys()
def error_echo(code):
return TMC.ERROR_CODES[code] if code in TMC.ERROR_CODES.keys() else False
def check_avail_db_type(db_type):
return True if db_type in TMC.AVAIL_DATABASES else 'TPE-1'
def assert_greengenes_taxa_map(map_file):
check = os.path.isfile(map_file) and os.stat(map_file).st_size > 0
return True if check else 'TPE-3'
def assert_silva_taxa_map(map_file):
check = os.path.isfile(map_file) and os.stat(map_file).st_size > 0
return True if check else 'TPE-4'
def assert_lineages(lineage_file):
check = os.path.isfile(lineage_file) and os.stat(lineage_file).st_size > 0
return True if check else 'TPE-5'
def read_db(database_type,map_file):
error_code = False
if database_type == 'greengenes':
file_pass = assert_greengenes_taxa_map(map_file)
if error_pass(file_pass):
return jadval.Greengenes(map_file) #Loading Greengeens database
else:
error_code = file_pass
elif database_type == 'silva':
file_pass = assert_silva_taxa_map(map_file)
if error_pass(file_pass):
return jadval.SILVA(map_file) #Loading Greengeens database
else:
error_code = file_pass
else:
error_code = 'TPE-2'
return error_code
def read_lineages(filename):
error_code = False
file_pass = assert_lineages(filename)
if error_pass(file_pass):
jadval_with_lineages = jadval.Taxa(filename)
removed_taxa = jadval_with_lineages.drop_otus_without_taxa()
return {'lineages':jadval_with_lineages,'removed':removed_taxa}
else:
error_code = file_pass
return error_code
def lineage_correlator(db_type,map_file,lineage_file):
error_code = False
db_avail = check_avail_db_type(db_type)
if error_pass(db_avail):
ref_taxa = read_db(db_type,map_file)
if error_pass(ref_taxa):
target_taxa = read_lineages(lineage_file)
if error_pass(target_taxa):
ids_without_taxa = list(target_taxa['removed'])
target_taxa = target_taxa['lineages']
correlations = target_taxa.load_database(ref_taxa)
return {'removed_taxa':ids_without_taxa,'correlations':correlations['corr_as_series'],'total_assignments':correlations['total']}
else:
error_code = target_taxa
else:
error_code = ref_taxa
else:
error_code = db_avail
return error_code
def csv_output(filename,ref_type,correlations):
corr_data = [[target_id,ref_id] for target_id, ref_id in correlations.items()]
corr_data.sort(key=lambda x:x[0])
headers = ['Target ID',ref_type+' ID']
try:
with open(filename, 'w') as out_file:
csv_writer = csv.writer(out_file)
csv_writer.writerow(headers)
csv_writer.writerows(corr_data)
except Exception as e:
return e
return
| 32.638095
| 144
| 0.662095
|
4a06ed9850c00a48b0321e595d279c60885c5b2f
| 22,074
|
py
|
Python
|
aosm2m/client/onem2m/http/OneM2MRequest.py
|
droberts-aetheros/sdk-python
|
bf582174419eb01da7f9cf73092cf977f4a4d2bc
|
[
"MIT"
] | null | null | null |
aosm2m/client/onem2m/http/OneM2MRequest.py
|
droberts-aetheros/sdk-python
|
bf582174419eb01da7f9cf73092cf977f4a4d2bc
|
[
"MIT"
] | null | null | null |
aosm2m/client/onem2m/http/OneM2MRequest.py
|
droberts-aetheros/sdk-python
|
bf582174419eb01da7f9cf73092cf977f4a4d2bc
|
[
"MIT"
] | null | null | null |
# Copyright (c) Aetheros, Inc. See COPYRIGHT
#!/usr/bin/env python
import requests, aiohttp, json, random, urllib
from ..OneM2MPrimitive import OneM2MPrimitive
from ..OneM2MOperation import OneM2MOperation
from ..http.OneM2MResponse import OneM2MResponse
from ..OneM2MResource import OneM2MResource, OneM2MResourceContent
from ...exceptions.BaseException import BaseException
from ..http.HttpHeader import HttpHeader
from typing import Dict, Mapping, MutableMapping, Any, List, Optional
import os, ssl
ssl._create_default_https_context = ssl._create_unverified_context
class OneM2MRequest(OneM2MPrimitive):
"""OneM2M request primitive to http mapping.
"""
# Result content args.
# @todo move these to their own class?
M2M_RCN_NOTHING = 0
M2M_RCN_UNSPECIFIED = 1
M2M_RCN_ATTRIBUTES = 1
M2M_RCN_HIERARCHICAL_ADDRESS = 2
M2M_RCN_HIERARCHICAL_ADDRESS_ATTRIBUTES = 3
M2M_RCN_ATTRIBUTES_CHILD_RESOURCES = 4
M2M_RCN_ATTRIBUTES_CHILD_RESOURCE_REFERENCES = 5
M2M_RCN_CHILD_RESOURCE_REFERENCES = 6
M2M_RCN_ORIGINAL_RESOURCE = 7
M2M_RCN_CHILD_RESOURCES = 8
M2M_RCN: List[int] = [
M2M_RCN_NOTHING,
M2M_RCN_UNSPECIFIED,
M2M_RCN_ATTRIBUTES,
M2M_RCN_HIERARCHICAL_ADDRESS,
M2M_RCN_HIERARCHICAL_ADDRESS_ATTRIBUTES,
M2M_RCN_ATTRIBUTES_CHILD_RESOURCES,
M2M_RCN_ATTRIBUTES_CHILD_RESOURCE_REFERENCES,
M2M_RCN_CHILD_RESOURCE_REFERENCES,
M2M_RCN_ORIGINAL_RESOURCE,
M2M_RCN_CHILD_RESOURCES,
]
# ./@todo move these to their own class?
# Required parameters for each onem2m operation. If a requested operation's params
# does not contain all of the corresponding parameters outline here, the function
# performing the operation will raise an RequiredRequestParameterMissingException.
REQUIRED_HTTP_PARAMS: Mapping[str, List[str]] = {
OneM2MOperation.Create: [
# OneM2MPrimitive.M2M_PARAM_OPERATION, # Implied by the function name.
OneM2MPrimitive.M2M_PARAM_TO, # Set in the constructor or the function.
OneM2MPrimitive.M2M_PARAM_FROM, # Set in the constructor?
OneM2MPrimitive.M2M_PARAM_REQUEST_IDENTIFIER, # Generated dynamically.
# OneM2MPrimitive.M2M_PARAM_RESOURCE_TYPE # Set in the constructor
],
OneM2MOperation.Retrieve: [],
OneM2MOperation.Update: [],
OneM2MOperation.Delete: [],
OneM2MOperation.Notify: [],
}
# Query string param shortnames.
# TS-0009-V2.6.1 Table 6.2.2.1-1
M2M_PARAM_RESPONSE_TYPE = 'rt'
M2M_PARAM_RESULT_PERSISTENCE = 'rp'
M2M_PARAM_RESULT_CONTENT = 'rcn'
M2M_PARAM_DELIVERY_AGGREGATION = 'da'
M2M_PARAM_CREATED_BEFORE = 'crb'
M2M_PARAM_CREATED_AFTER = 'cra'
M2M_PARAM_MODIFIED_SINCE = 'ms'
M2M_PARAM_UNMODIFIED_SINCE = 'us'
M2M_PARAM_STATE_TAG_SMALLER = 'sts'
M2M_PARAM_STATE_TAG_BIGGER = 'stb'
M2M_PARAM_EXPIRE_BEFORE = 'exb'
M2M_PARAM_EXPIRE_AFTER = 'exa'
M2M_PARAM_LABELS = 'lbl'
M2M_PARAM_RESOURCE_TYPE = 'ty'
M2M_PARAM_SIZE_ABOVE = 'sza'
M2M_PARAM_SIZE_BELOW = 'szb'
M2M_PARAM_CONTENT_TYPE = 'cty'
M2M_PARAM_LIMIT = 'lim'
M2M_PARAM_ATTRIBUTE = 'atr'
M2M_PARAM_FILTER_USAGE = 'fu'
M2M_PARAM_SEMANTICS_FILTER = 'smf'
M2M_PARAM_DISCOVERY_RESULT_TYPE = 'drt'
M2M_PARAM_ROLE_IDS = 'rids'
M2M_PARAM_TOKEN_IDS = 'tids'
M2M_PARAM_LOCAL_TOKEN_IDS = 'ltids'
M2M_PARAM_TOKEN_REQUEST_INDICATOR = 'tqi'
# Request params (query string).
QUERY_STRING_PARAMS: List[str] = [
M2M_PARAM_RESPONSE_TYPE,
M2M_PARAM_RESULT_PERSISTENCE,
M2M_PARAM_DELIVERY_AGGREGATION,
M2M_PARAM_CREATED_BEFORE,
M2M_PARAM_CREATED_AFTER,
M2M_PARAM_MODIFIED_SINCE,
M2M_PARAM_UNMODIFIED_SINCE,
M2M_PARAM_STATE_TAG_SMALLER,
M2M_PARAM_STATE_TAG_BIGGER,
M2M_PARAM_EXPIRE_BEFORE,
M2M_PARAM_EXPIRE_AFTER,
M2M_PARAM_LABELS,
M2M_PARAM_SIZE_ABOVE,
M2M_PARAM_SIZE_BELOW,
M2M_PARAM_CONTENT_TYPE,
M2M_PARAM_LIMIT,
M2M_PARAM_ATTRIBUTE,
M2M_PARAM_FILTER_USAGE,
M2M_PARAM_SEMANTICS_FILTER,
M2M_PARAM_DISCOVERY_RESULT_TYPE,
M2M_PARAM_ROLE_IDS,
M2M_PARAM_TOKEN_IDS,
M2M_PARAM_LOCAL_TOKEN_IDS,
M2M_PARAM_TOKEN_REQUEST_INDICATOR,
OneM2MPrimitive.M2M_PARAM_RESOURCE_NAME,
]
Parameters = MutableMapping[str, Any]
def __init__(self, to: str = None, params: Parameters = {}):
""" Constructor.
Args:
to: The cse host
params: The request params to convert to http headers.
"""
# Target host.
self.to = to
# No param requirements can be enforced because we dont know the requested operation
# that will be performed yet. OneM2M operations are dictated by the member function that is
# called on the instance and param validation is performed in those functions.
self.params = params
def _validate_required_params(self, operation: str, params: Parameters):
"""Validates the required parameters (HTTP mapped ones only) for a specified OneM2M operation (Create, Retrieve, ect).
Args:
op: The OneM2M operation to validate for.
params: The request params to validate.
Raises:
RequiredRequestParameterMissingException: If a required parameter is missing.
"""
# Get the request params keys.
request_params = tuple(params.keys())
# Determine the operation. Operation and parameters can be specified in the constructor and then overriden
# in the actual call to the request function (create, retrieve, ect...)
request_operation = (
params[OneM2MPrimitive.M2M_PARAM_OPERATION]
if operation is None
else operation
)
# Get the required parameters for the specified operation.
required_params = self.REQUIRED_HTTP_PARAMS[request_operation]
# Raise an exception if a required param is missing in the request.
for param in required_params:
if param not in request_params:
raise RequiredRequestParameterMissingException(operation, param)
def _validate_query_string_param(self, name: str, value):
"""Validates query string parameters are valid.
Args:
name: The parameter name.
value: The value of the parameter.
Return:
Always True unless preempted by an exception.
Raises:
InvalidOneM2MRequestParameterException: If the parameter name is not valid.
InvalidOneM2MRequestParameterValueException: If the parameter value is invalid (0..1 || 0..N) @todo
"""
if name not in OneM2MRequest.QUERY_STRING_PARAMS:
raise InvalidOneM2MRequestParameterException(name)
return True
# @todo add param value validation base on muliplicity
# TS-0009-V2.6.1 Table 6.2.2.2-1 query string param values. Do validation on header values too (are there value constraints)?
def _map_params_to_headers(self, params: Parameters):
""" Converts a OneM2M request parameters to their corresponding HTTP headers.
If the submitted argument contains request parameters that DO NOT map to a
HTTP header, they will be silently ignored.
Args:
params: The OneM2M request parameters to convert to HTTP headers.
Returns:
A dict of HTTP headers and values.
"""
# Header dict to build and return.
header = {}
# Build the request headers.
for param, value in params.items():
# Perform any validation or transformation on request values here.
if param is OneM2MPrimitive.M2M_PARAM_OPERATION:
value = OneM2MPrimitive.OPS_TO_METHOD_MAPPING[
value
] # Transform onem2m operation to http method.
# Avoid key exception on params that map query strings. Map only existing values. Ignore any params not
# explicitly defined as having a http header mapping in the M2M_PARAM_TO_HTTP_HEADER_MAP dict.
if param in OneM2MPrimitive.M2M_PARAM_TO_HTTP_HEADER_MAP.keys():
header[OneM2MPrimitive.M2M_PARAM_TO_HTTP_HEADER_MAP[param]] = str(value)
return header
def _map_params_to_query_string(self, to: str, params: Parameters):
"""Maps OneM2M parameters to their query string representation and appends them to 'to'.
Args:
to: The 'to' param
params: A params dict
Returns:
The 'to' string with the query string arguments appended to it.
"""
# Strip query string in event its already been applied and append the query string indicator.
to = to.split('?')[0] + '?'
for param, value in params.items():
if param not in OneM2MPrimitive.M2M_PARAM_TO_HTTP_HEADER_MAP.keys() and param in OneM2MRequest.QUERY_STRING_PARAMS:
if to[-1] != '?':
to += '&'
to += '{}={}'.format(param, urllib.parse.quote(str(value)))
# No query string, strip the '? and return the just 'to'. Otherwise, return the modified to with query string.
return to[:-1] if to[-1] == '?' else to
def _resolve_params(self, to: Optional[str], params: Optional[MutableMapping[str, Any]] = None):
"""Resolves 'to' and 'params' arguments according to a hierachy:
1) 'to' and 'param' arguments that are explicitly set here
override the 'to' and 'param' members set in the constuctor, but
are NOT PERSISTED to any following requests.
2) If 'to' is None, then 'params' is checked for 'to' and used. If no 'to'
is found in 'params' or 'params' are None, the member 'to' and 'params' set in the
constructor are used.
Args:
to: The 'to' param
param: The remaining params
Returns:
The resolved 'to' (string), params (dict).
Raises:
InvalidRequestParameterStructureException: If the params argument is not a dict.
"""
# Use the params set in the constructor.
if params is None:
params = self.params
assert params is not None
# Generate a random request id.
params[OneM2MPrimitive.M2M_PARAM_REQUEST_IDENTIFIER] = self._generate_rqi()
# Params must be expressed as a dict.
if isinstance(params, dict) is False:
raise InvalidRequestParameterStructureException(params)
# Use params 'to'.
if to is None:
# If params contains to...
if OneM2MPrimitive.M2M_PARAM_TO in params.keys():
# to param will be removed in _map_params_to_headers.
to = params[OneM2MPrimitive.M2M_PARAM_TO]
if to is None:
to = self.to
assert to is not None
if OneM2MPrimitive.M2M_PARAM_TO not in params.keys():
params[OneM2MPrimitive.M2M_PARAM_TO] = to
to = self._map_params_to_query_string(to, params)
return to, params
def _get_all_request_params(self):
"""Aggregates all of the query string and header request params.
Returns: A list of all param names.
"""
return sum(self.REQUIRED_HTTP_PARAMS.values(), self.QUERY_STRING_PARAMS)
def set_param(self, param, value=None):
"""Sets a request parameter.
Args:
param: The param name
value: The param value
Raises:
InvalidOneM2MRequestParameterException: If the params argument is not a dict.
"""
# Check if a dict of params was passed in.
if value is None and type(param) is dict:
for p, v in param.items():
# Dont allow invalid request params, but dont throw an exception. Ignore and log.
if p in self._get_all_request_params():
self.params[p] = v
else:
# @todo do some logging
pass
else:
# Dont allow invalid request params, but dont throw an exception. Ignore and log.
if param in self._get_all_request_params():
# @todo perform some validation...
self.params[param] = value
else:
# @todo do some logging
pass
def create(self, to: str, params: Parameters = None, content = None, insecure=False):
""" Synchronous OneM2M Create request.
Args:
to: Host (Overrides 'to' argument set in constructor.)
params: Dict of OneM2MParams (Overrides 'params' argument set in constructor.)
content: A OneM2MResource
Returns:
A OneM2MResponse object.
Raises:
RequiredRequestParameterMissingException: If a required parameter is not is not included.
"""
# Process arguments.
to, params = self._resolve_params(to, params)
assert params is not None
# If params is set to None, check if the instance was initialized with paramters.
# Raises an RequiredRequestParameterMissingException.
self._validate_required_params(OneM2MOperation.Create, params)
# Convert OneM2M request params to headers for HTTP request.
headers = self._map_params_to_headers(params)
# Set the content type AND append the oneM2M resource type for the request.
# @todo move this to member with setter function.
headers[HttpHeader.CONTENT_TYPE] = OneM2MPrimitive.CONTENT_TYPE_JSON + ' ty='+str(params['ty'])
# Extract entity members as dict.
if isinstance(content, OneM2MResource):
# Wrap the entity in a container json object
# entity_name = content.__class__.__name__.lower()
# @todo raise an ShortNameNotSet (OneM2MResource) expection.
entity_name = content.short_name
data = {entity_name: content.get_content()}
# Serialize dict. @todo data serialization must be dictated by content-type
json_data = json.dumps(data)
# HTTP POST implied by OneM2M Create Operation (function signature).
http_response = requests.post(to, headers = headers, data=json_data, verify=not insecure)
else:
# HTTP POST implied by OneM2M Create Operation (function signature).
http_response = requests.post(to, headers = headers, data=json.dumps(content), verify=not insecure)
# Return a OneM2MResponse instance.
return OneM2MResponse(http_response)
def update(self, to=None, params=None, content=None):
""" Synchronous OneM2M update request.
Args:
to: Host (Overrides 'to' argument set in constructor.)
params: Dict of OneM2MParams (Overrides 'params' argument set in constructor.)
content: A OneM2MResource
Returns:
A OneM2MResponse object.
Raises:
RequiredRequestParameterMissingException: If a required parameter is not is not included.
"""
# Process arguments.
to, params = self._resolve_params(to, params)
# If params is set to None, check if the instance was initialized with paramters.
# Raises an RequiredRequestParameterMissingException.
self._validate_required_params(OneM2MOperation.Create, params)
# Convert OneM2M request params to headers for HTTP request.
headers = self._map_params_to_headers(params)
# Set the content type for the request.
# @todo move this to member with setter function.
headers[HttpHeader.CONTENT_TYPE] = OneM2MPrimitive.CONTENT_TYPE_JSON + ' ty='+str(params['ty'])
# Extract entity members as dict.
if isinstance(content, OneM2MResource):
entity_name = content.short_name
data = {entity_name: content.get_content()}
# Serialize dict. @todo data serialization must be dictated by content-type
json_data = json.dumps(data)
# HTTP POST implied by OneM2M Create Operation (function signature).
http_response = requests.put(to, headers=headers, data=json_data)
# Return a OneM2MResponse instance.
return OneM2MResponse(http_response)
else:
raise Exception('Update must be an instance of OneM2MResource')
def retrieve(self, to=None, params=None):
""" Synchronous OneM2M Retrieve request.
Args:
to: Host (Overrides 'to' argument set in constructor.)
params: Dict of OneM2MParams (Overrides 'params' argument set in constructor.)
content: A OneM2MResource
Returns:
A OneM2MResponse object.
Raises:
RequiredRequestParameterMissingException: If a required parameter is not is not included.
"""
# Process arguments.
to, params = self._resolve_params(to, params)
# If params is set to None, check if the instance was initialized with paramters.
# Raises an RequiredRequestParameterMissingException.
self._validate_required_params(OneM2MOperation.Retrieve, params)
# Convert OneM2M request params to headers for HTTP request.
headers = self._map_params_to_headers(params)
# Set the content type for the request.
# @todo move this to member with setter function.
headers[HttpHeader.CONTENT_TYPE] = OneM2MPrimitive.CONTENT_TYPE_JSON + ' ty='+str(params['ty'])
# HTTP GET implied by OneM2M retrieve Operation (function signature).
http_response = requests.get(to, headers=headers, verify=False)
# Return a OneM2MResponse instance.
return OneM2MResponse(http_response)
def delete(self, to=None, params=None):
""" Synchronous OneM2M Delete operation.
Args:
to: Host (Overrides 'to' argument set in constructor.)
param: Dict of OneM2MParams (Overrides 'params' argument set in constructor.)
Returns:
A OneM2MResponse object.
"""
# Process arguments.
to, params = self._resolve_params(to, params)
# If params is set to None, check if the instance was initialized with paramters.
# Raises an exception @todo implement MissingRequiredParamException.
self._validate_required_params(OneM2MOperation.Delete, params)
# Build headers for HTTP request.
headers = self._map_params_to_headers(params)
# Set the content type for the request.
# @todo move this to member with setter function.
headers[HttpHeader.CONTENT_TYPE] = OneM2MPrimitive.CONTENT_TYPE_JSON + ' ty='+str(params['ty'])
# HTTP POST implied by OneM2M Create Operation (function signature).
http_response = requests.delete(to, headers=headers)
# Return a OneM2MResponse instance.
return OneM2MResponse(http_response)
def notify(self, to=None, params=None):
pass
async def create_async(self, to, params=None, content=None):
"""Asynchronous create (POST) OneM2M request.
Args:
to: Host (Overrides 'to' argument set in constructor.)
param: Dict of OneM2MParams (Overrides 'params' argument set in constructor.)
content: ...
Returns:
A OneM2MResponse object.
"""
# Ensure form of params.
if params is not None and isinstance(params, dict) is False:
raise Exception('Headers must be dict')
# If params is set to None, check if the instance was initialized with paramters.
# Raises an exception @todo implement MissingRequiredParamException.
self._validate_required_params(OneM2MOperation.Create, params)
async with aiohttp.ClientSession() as session:
async with session.get(to) as resp:
# Map the client response to the OneM2MResponse.
return resp
def _generate_rqi(self):
"""Generate a random request id.
Returns:
str: web.Request id.
"""
return str(random.randrange(1000000, 999999999999))
def get_headers(self):
return self.headers
class InvalidOneM2MRequestParameterException(BaseException):
def __init__(self, param: str):
self.message = '{} is not a valid OneM2M request parameter.'.format(param)
class RequiredRequestParameterMissingException(BaseException):
def __init__(self, op: str, param: str):
"""Missing required parameter from a OneM2M request.
Args:
op: OneM2M operation
param: Missing param
"""
self.op = op
self.param = param
self.msg = 'The "{}" op requires the "{}" param be included in the request.'.format(op, param)
class InvalidRequestParameterStructureException(BaseException):
def __init__(self, obj):
"""Request parameter structure must be dict exception.
Args:
obj: The object passed as the request parameters.
"""
self.type = obj.__class__.__name__
self.msg = 'Request "params" must be a dict. {} recieved.'.format(self.type)
class InvalidOneM2MOperationException(BaseException):
def __init__(self, msg: str):
self.message = msg
| 39.27758
| 134
| 0.642657
|
4a06ed9b7f59f11b068e84e50fa5d8ba263ccd1c
| 19,270
|
py
|
Python
|
mindspore/ops/operations/comm_ops.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | 2
|
2020-11-23T13:46:37.000Z
|
2020-12-20T02:02:38.000Z
|
mindspore/ops/operations/comm_ops.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | null | null | null |
mindspore/ops/operations/comm_ops.py
|
dongkcs/mindspore
|
cd7df6dbf463ff3128e9181e9d0c779cecb81320
|
[
"Apache-2.0"
] | 1
|
2021-01-01T08:35:01.000Z
|
2021-01-01T08:35:01.000Z
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""comm_ops"""
from mindspore.common import Tensor
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...communication.management import get_rank, get_group_size, GlobalComm, _get_group
from ...common import dtype as mstype
from ..primitive import PrimitiveWithInfer, prim_attr_register
class ReduceOp:
"""
Operation options for reduce tensors.
There are four kinds of operation options, "SUM", "MAX", "MIN", and "PROD".
- SUM: Take the sum.
- MAX: Take the maximum.
- MIN: Take the minimum.
- PROD: Take the product.
"""
SUM = "sum"
MAX = "max"
MIN = "min"
PROD = "prod"
target_dtypes = (mstype.int8, mstype.int32, mstype.float16, mstype.float32)
class AllReduce(PrimitiveWithInfer):
"""
Reduces the tensor data across all devices in such a way that all devices will get the same final result.
Note:
The operation of AllReduce does not support "prod" currently.
The tensors must have the same shape and format in all processes of the collection.
Args:
op (str): Specifies an operation used for element-wise reductions,
like sum, max, and min. Default: ReduceOp.SUM.
group (str): The communication group to work on. Default: "hccl_world_group".
Raises:
TypeError: If any of operation and group is not a string,
or fusion is not an integer, or the input's dtype is bool.
ValueError: If the operation is "prod".
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
The contents depend on the specified operation.
Examples:
>>> from mindspore.communication import init
>>> from mindspore import Tensor
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>>
>>> init()
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.allreduce_sum = P.AllReduce(ReduceOp.SUM, group="nccl_world_group")
>>>
>>> def construct(self, x):
>>> return self.allreduce_sum(x)
>>>
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
>>> net = Net()
>>> output = net(input_)
"""
@prim_attr_register
def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
if not isinstance(op, type(ReduceOp.SUM)):
raise TypeError("The operation of AllReduce should be str.")
if op == ReduceOp.PROD:
raise RuntimeError("The operation of AllReduce 'prod' is not supported yet.")
if not isinstance(_get_group(group), str):
raise TypeError("The group of AllReduce should be str.")
self.op = op
self.add_prim_attr('group', _get_group(group))
self.add_prim_attr('fusion', 0)
self.add_prim_attr('index', 0)
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
class AllGather(PrimitiveWithInfer):
"""
Gathers tensors from the specified communication group.
Note:
The tensors must have the same shape and format in all processes of the collection.
Args:
group (str): The communication group to work on. Default: "hccl_world_group".
Raises:
TypeError: If group is not a string.
ValueError: If the local rank id of the calling process in the group
is larger than the group's rank size.
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor. If the number of devices in the group is N,
then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`.
Examples:
>>> import mindspore.ops.operations as P
>>> import mindspore.nn as nn
>>> from mindspore.communication import init
>>> from mindspore import Tensor
>>>
>>> init()
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.allgather = P.AllGather(group="nccl_world_group")
>>>
>>> def construct(self, x):
>>> return self.allgather(x)
>>>
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
>>> net = Net()
>>> output = net(input_)
"""
@prim_attr_register
def __init__(self, group=GlobalComm.WORLD_COMM_GROUP):
validator.check_value_type('group', _get_group(group), (str,), self.name)
self.rank = get_rank(_get_group(group))
self.rank_size = get_group_size(_get_group(group))
validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name)
self.add_prim_attr('rank_size', self.rank_size)
self.add_prim_attr('group', _get_group(group))
self.add_prim_attr('fusion', 0)
def infer_shape(self, x_shape):
validator.check_positive_int(len(x_shape), "x shape", self.name)
x_shape[0] = x_shape[0] * self.rank_size
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
def __call__(self, tensor):
raise NotImplementedError
class _HostAllGather(PrimitiveWithInfer):
"""
Gathers tensors from the specified communication group on host.
Note:
The tensors must have the same shape and format in all processes of the collection.
_HostAllGather is a host-side operator, it depends on OpenMPI and must use build option -M on
to enable it. Using mpirun command to run it:
mpirun -output-filename log -merge-stderr-to-stdout -np 3 python test_host_all_gather.py
Args:
group (Union[tuple[int],list[int]]): The rand_ids of communication group to work on.
Raises:
TypeError: If group is not a list nor tuple, or elements of group are not int.
ValueError: If group is not set, or rank_id from group not in [0, 7].
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor. If the number of devices in the group is N,
then the shape of output is :math:`(N, x_1, x_2, ..., x_R)`.
"""
@prim_attr_register
def __init__(self, group=None):
if group is None:
raise ValueError(f"For '{self.name}' group must be set.")
validator.check_value_type('group', group, (tuple, list), self.name)
validator.check_int(len(group), 2, Rel.GE, "group size", self.name)
for r in group:
validator.check_int_range(r, 0, 7, Rel.INC_BOTH, "rank_id", self.name)
validator.check_value_type("rank_id", r, (int,), self.name)
self.group_size = len(group)
self.add_prim_attr('group', group)
def infer_shape(self, x_shape):
validator.check_positive_int(len(x_shape), "x shape", self.name)
x_shape[0] = x_shape[0] * self.group_size
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
def __call__(self, tensor):
raise NotImplementedError
class ReduceScatter(PrimitiveWithInfer):
"""
Reduces and scatters tensors from the specified communication group.
Note:
The back propagation of the op is not supported yet. Stay tuned for more.
The tensors must have the same shape and format in all processes of the collection.
Args:
op (str): Specifies an operation used for element-wise reductions,
like SUM, MAX, AVG. Default: ReduceOp.SUM.
group (str): The communication group to work on. Default: "hccl_world_group".
Raises:
TypeError: If any of operation and group is not a string.
ValueError: If the first dimension of the input cannot be divided by the rank size.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.communication import init
>>> from mindspore.ops.operations.comm_ops import ReduceOp
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>>
>>> init()
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.reducescatter = P.ReduceScatter(ReduceOp.SUM, group="nccl_world_group")
>>>
>>> def construct(self, x):
>>> return self.reducescatter(x)
>>>
>>> input_ = Tensor(np.ones([8, 8]).astype(np.float32))
>>> net = Net()
>>> output = net(input_)
"""
@prim_attr_register
def __init__(self, op=ReduceOp.SUM, group=GlobalComm.WORLD_COMM_GROUP):
validator.check_value_type('op', op, (type(ReduceOp.SUM),), self.name)
validator.check_value_type('group', _get_group(group), (str,), self.name)
self.op = op
self.rank_size = get_group_size(_get_group(group))
self.add_prim_attr('rank_size', self.rank_size)
self.add_prim_attr('group', _get_group(group))
self.add_prim_attr('fusion', 0)
def infer_shape(self, x_shape):
if x_shape[0] % self.rank_size != 0:
raise ValueError(f"For '{self.name}' the first dimension of x should be divided by rank_size.")
x_shape[0] = int(x_shape[0]/self.rank_size)
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
def __call__(self, tensor):
raise NotImplementedError
class _HostReduceScatter(PrimitiveWithInfer):
"""
Reduces and scatters tensors from the specified communication group on host.
Note:
The tensors must have the same shape and format in all processes of the collection.
_HostReduceScatter is a host-side operator, it depends on OpenMPI and must use build option
-M on to enable it. Using mpirun command to run it:
mpirun -output-filename log -merge-stderr-to-stdout -np 3 python test_host_reduce_scatter.py
Args:
op (str): Specifies an operation used for element-wise reductions,
like sum, max, avg. Default: ReduceOp.SUM.
group (Union[tuple[int],list[int]]): The rand_ids of communication group to work on.
Raises:
TypeError: If op is not a string and group is not a list nor tuple,
or elements of group are not int.
ValueError: If the first dimension of input can not be divided by group size,
or group is not set, or rank_id not in [0, 7].
"""
@prim_attr_register
def __init__(self, op=ReduceOp.SUM, group=None):
if group is None:
raise ValueError(f"For '{self.name}' group must be set.")
validator.check_value_type('op', op, (type(ReduceOp.SUM),), self.name)
validator.check_value_type('group', group, (tuple, list), self.name)
validator.check_int(len(group), 2, Rel.GE, "group size", self.name)
for r in group:
validator.check_int_range(r, 0, 7, Rel.INC_BOTH, "rank_id", self.name)
validator.check_value_type("rank_id", r, (int,), self.name)
self.op = op
self.group_size = len(group)
self.add_prim_attr('group', group)
def infer_shape(self, x_shape):
if x_shape[0] % self.group_size != 0:
raise ValueError(f"For '{self.name}' the first dimension of x should be divided by group_size.")
x_shape[0] = int(x_shape[0]/self.group_size)
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
def __call__(self, tensor):
raise NotImplementedError
class Broadcast(PrimitiveWithInfer):
"""
Broadcasts the tensor to the whole group.
Note:
The tensors must have the same shape and format in all processes of the collection.
Args:
root_rank (int): Source rank. Required in all processes except the one
that is sending the data.
group (str): The communication group to work on. Default: "hccl_world_group".
Inputs:
- **input_x** (Tensor) - The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
Outputs:
Tensor, has the same shape of the input, i.e., :math:`(x_1, x_2, ..., x_R)`.
The contents depend on the data of the `root_rank` device.
Raises:
TypeError: If root_rank is not a integer or group is not a string.
Examples:
>>> from mindspore import Tensor
>>> from mindspore.communication import init
>>> import mindspore.nn as nn
>>> import mindspore.ops.operations as P
>>>
>>> init()
>>> class Net(nn.Cell):
>>> def __init__(self):
>>> super(Net, self).__init__()
>>> self.broadcast = P.Broadcast(1)
>>>
>>> def construct(self, x):
>>> return self.broadcast((x,))
>>>
>>> input_ = Tensor(np.ones([2, 8]).astype(np.float32))
>>> net = Net()
>>> output = net(input_)
"""
@prim_attr_register
def __init__(self, root_rank, group=GlobalComm.WORLD_COMM_GROUP):
validator.check_value_type('root_rank', root_rank, (int,), self.name)
validator.check_value_type('group', _get_group(group), (str,), self.name)
self.add_prim_attr('group', _get_group(group))
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
if not isinstance(x_dtype, tuple):
raise TypeError(f"{self.name}'s input should be a tuple!")
for _ele in x_dtype:
validator.check_tensor_type_same({'x': _ele}, target_dtypes, self.name)
return x_dtype
class _AlltoAll(PrimitiveWithInfer):
"""
AlltoAll is a collective operation.
AlltoAll sends data from the all processes to the all processes in the specified group. It has two phases:
- The scatter phase: On each process, the operand is split into split_count number of blocks along the
split_dimensions, and the blocks are scattered to all processes, e.g., the ith block is send to the ith process.
- The gather phase: Each process concatenates the received blocks along the concat_dimension.
Note:
The tensors must have the same shape and format in all processes of the collection.
Args:
split_count (int): On each process, divide blocks into split_count number.
split_dim (int): On each process, split blocks along the split_dim.
concat_dim (int): On each process, gather the received blocks along the concat_dimension.
group (str): The communication group to work on. Default: "hccl_world_group".
Raises:
TypeError: If group is not a string.
"""
@prim_attr_register
def __init__(self, split_count, split_dim, concat_dim, group=GlobalComm.WORLD_COMM_GROUP):
"""Initialize AlltoAll"""
validator.check_value_type('group', _get_group(group), (str,), self.name)
self.split_count = split_count
self.split_dim = split_dim
self.concat_dim = concat_dim
self.add_prim_attr('group', _get_group(group))
def infer_shape(self, x_shape):
x_shape[self.concat_dim] = x_shape[self.concat_dim] * self.split_count
x_shape[self.split_dim] = int(x_shape[self.split_dim] / self.split_count)
return x_shape
def infer_dtype(self, x_dtype):
validator.check_tensor_type_same({'x': x_dtype}, target_dtypes, self.name)
return x_dtype
def __call__(self, tensor):
return
class _MirrorOperator(PrimitiveWithInfer):
"""
Auto parallel virtual operator. Do nothing in forward, do all reduce and mean in backward. It is only for
internal use of parallel modules and cannot be called by users.
Args:
group (str): The communication group to work on. Default: None.
dev_num (int): The device number of the group. Default: None.
mean_flag (bool): Whether use mean in backward. Default: None.
"""
@prim_attr_register
def __init__(self, group=None, dev_num=None, mean_flag=None):
self.group = group
self.dev_num = dev_num
self.mean_flag = mean_flag
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
return x_dtype
mirror = _MirrorOperator()
class _VirtualDiv(PrimitiveWithInfer):
"""
Auto parallel virtual operator. Do nothing in forward, do Div in backward.
Args:
divisor: float32
"""
@prim_attr_register
def __init__(self, divisor=None):
self.divisor = divisor
def infer_shape(self, x_shape):
return x_shape
def infer_dtype(self, x_dtype):
return x_dtype
virtual_div = _VirtualDiv()
class _VirtualDataset(PrimitiveWithInfer):
"""
Auto parallel virtual dataset operator.
It would insert Broadcast operator in forward computation and be deleted before backward computation.
"""
@prim_attr_register
def __init__(self):
"""init"""
def infer_shape(self, *args):
if len(args) == 1:
return args[0]
return args
def infer_dtype(self, *args):
if len(args) == 1:
return args[0]
return args
virtual_dataset = _VirtualDataset()
class _GetTensorSlice(PrimitiveWithInfer):
"""
Gets tensor slice by device matrix and tensor map.
Args:
dev_mat (tuple): The device matrix of the slice tensor.
tensor_map (tuple): The tensor map of the slice tensor.
"""
@prim_attr_register
def __init__(self):
"""Initialize ChunkTensor"""
def infer_value(self, x, dev_mat, tensor_map):
from mindspore.parallel._tensor import _load_tensor
validator.check_value_type("dev_mat", dev_mat, [tuple], self.name)
validator.check_value_type("tensor_map", tensor_map, [tuple], self.name)
return Tensor(_load_tensor(x, dev_mat, tensor_map))
| 36.153846
| 118
| 0.637364
|
4a06eda94cb7431545e8eecfe4f34d5183e3a6bb
| 4,795
|
py
|
Python
|
train.py
|
Daeil-Jung/Fundus_Process
|
db7585244036275405425c55771341ccfed22ddc
|
[
"MIT"
] | null | null | null |
train.py
|
Daeil-Jung/Fundus_Process
|
db7585244036275405425c55771341ccfed22ddc
|
[
"MIT"
] | 3
|
2021-06-08T21:57:31.000Z
|
2022-03-12T00:39:31.000Z
|
train.py
|
Daeil-Jung/Fundus_Process
|
db7585244036275405425c55771341ccfed22ddc
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os, datetime
import pandas as pd
tf.config.set_visible_devices([], 'GPU')
tf.random.set_seed(1234)
cur_dir = os.path.abspath(os.getcwd())
csv_data = pd.read_csv(os.path.join(cur_dir, "DBdata.csv"))
batch_size = 32
epochs = 20
IMG_HEIGHT = 448
IMG_WIDTH = 448
train_dir = "Fundus" + os.sep + "train"
test_dir = "Fundus" + os.sep + "test"
num_glaucoma_tr = len(os.listdir(os.path.join(train_dir, "Glaucoma")))
num_normal_tr = len(os.listdir(os.path.join(train_dir, "Normal")))
num_glaucoma_ts = len(os.listdir(os.path.join(test_dir, "Glaucoma")))
num_normal_ts = len(os.listdir(os.path.join(test_dir, "Normal")))
total_train = num_glaucoma_tr + num_normal_tr
total_ts = num_glaucoma_ts + num_normal_ts
train_image_generator = ImageDataGenerator(rescale=1./255, validation_split=0.2) # Generator for our training, validation data
test_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our test data
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode="categorical",
subset='training')
valid_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode="categorical",
subset='validation')
test_data_gen = test_image_generator.flow_from_directory(batch_size=batch_size,
directory=test_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical')
sample_training_images, _ = next(train_data_gen)
model = Sequential([
Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Dropout(0.1),
Conv2D(32, 3, padding='same', activation='relu'),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.1),
Conv2D(64, 3, padding='same', activation='relu'),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.1),
Flatten(),
Dense(1024, activation='relu'),
Dense(2, activation="softmax"),
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy', 'binary_crossentropy'])
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = tf.keras.callbacks.TensorBoard(logdir, histogram_freq=1)
history = model.fit(
train_data_gen,
epochs=epochs,
validation_data=valid_data_gen,
callbacks=[tensorboard_callback],
)
predict_value = model.predict(test_data_gen)
predict_value = tf.argmax(predict_value, axis=1)
predict_value_df = pd.DataFrame(predict_value)
train_pred = model.predict(train_data_gen)
test_files_name = []
for filepath in test_data_gen.filepaths:
test_files_name.append(filepath.split(os.sep)[-1].split("_")[:2])
extracted_data = pd.DataFrame(test_files_name)
labels = pd.DataFrame(test_data_gen.labels)
target_col = []
for i in range(len(test_files_name)):
try:
target_col.append(csv_data[(csv_data["study_id"] == int(test_files_name[i][0][1:])) & (csv_data["OSOD"] == test_files_name[i][1]) & (csv_data["organization"] == "DKU")].values[0][0])
except:
target_col.append(0)
target_col = pd.DataFrame(target_col)
extracted_data = pd.concat([target_col, extracted_data, labels, predict_value_df], axis=1, sort=False)
extracted_data.columns = ["id", "study_id", "OSOD", "labels", "predict"]
pd.set_option('display.max_columns', 500)
pd.set_option('display.max_rows', 500)
extracted_data.to_csv("model1.csv", index=False, index_label=True)
total_rows = extracted_data.shape[0]
correct_rows = extracted_data[extracted_data["labels"] == extracted_data["predict"]]
correct_rows = correct_rows.shape[0]
print(correct_rows / total_rows)
| 35.783582
| 190
| 0.625026
|
4a06eed8ea7c4f2eb813da4fa2e8ce97036ee63a
| 801
|
py
|
Python
|
yc242/1015-1.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc242/1015-1.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
yc242/1015-1.py
|
c-yan/yukicoder
|
cdbbd65402177225dd989df7fe01f67908484a69
|
[
"MIT"
] | null | null | null |
N, X, Y, Z = map(int, input().split())
A = list(map(int, input().split()))
A = [a + 1 for a in A]
A.sort(reverse=True)
for i in range(len(A)):
if Z == 0:
break
if A[i] >= 10000:
t = A[i] // 10000
t = min(t, Z)
Z -= t
A[i] -= t * 10000
else:
break
A = [a for a in A if a > 0]
A.sort(reverse=True)
A = A[Z:]
for i in range(len(A)):
if Y == 0:
break
if A[i] >= 5000:
t = A[i] // 5000
t = min(t, Y)
Y -= t
A[i] -= t * 5000
else:
break
A = [a for a in A if a > 0]
A.sort(reverse=True)
A = A[Y:]
for i in range(len(A)):
t = (A[i] + 999) // 1000
t = min(t, X)
X -= t
A[i] -= t * 1000
A = [a for a in A if a > 0]
if len(A) == 0:
print('Yes')
else:
print('No')
| 17.413043
| 38
| 0.423221
|
4a06ef7f72d63d9ee990f2af7f67f5d90e2cec63
| 173
|
py
|
Python
|
supervised_learning/generalized_linear_model/LinearRegression.py
|
LauZyHou/sklearn-STS
|
8dd90a8fcf37094ea03f06fa10ce74dcf2d57dd3
|
[
"MIT"
] | null | null | null |
supervised_learning/generalized_linear_model/LinearRegression.py
|
LauZyHou/sklearn-STS
|
8dd90a8fcf37094ea03f06fa10ce74dcf2d57dd3
|
[
"MIT"
] | null | null | null |
supervised_learning/generalized_linear_model/LinearRegression.py
|
LauZyHou/sklearn-STS
|
8dd90a8fcf37094ea03f06fa10ce74dcf2d57dd3
|
[
"MIT"
] | null | null | null |
from sklearn import linear_model
if __name__ == '__main__':
reg = linear_model.LinearRegression()
reg.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
print(reg.coef_)
| 24.714286
| 48
| 0.612717
|
4a06efa65d5a87830863f07332d02877e3784f27
| 2,762
|
py
|
Python
|
docs/conf.py
|
scaleapi/nucleus-python-client
|
8293cd5e7b368faf12bc2952defef36f712a7ba2
|
[
"MIT"
] | 13
|
2020-12-02T01:07:12.000Z
|
2022-03-15T17:28:37.000Z
|
docs/conf.py
|
scaleapi/nucleus-python-client
|
8293cd5e7b368faf12bc2952defef36f712a7ba2
|
[
"MIT"
] | 78
|
2020-10-29T09:33:04.000Z
|
2022-03-28T20:35:45.000Z
|
docs/conf.py
|
scaleapi/nucleus-python-client
|
8293cd5e7b368faf12bc2952defef36f712a7ba2
|
[
"MIT"
] | 8
|
2020-11-13T06:23:38.000Z
|
2022-03-03T20:48:06.000Z
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../"))
# -- Project information -----------------------------------------------------
project = "Nucleus"
copyright = "2021, Scale"
author = "Scale"
# The full version, including alpha/beta/rc tags
from nucleus import __version__ # noqa: E402
release = "v" + str(__version__)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"autoapi.extension",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_title = "Nucleus API Reference"
html_theme = "furo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
html_favicon = "favicon.ico"
html_logo = "nucleus-logo.svg"
html_theme_options = {
"logo_only": True,
"display_version": False,
}
# -- autogen configuration ---------------------------------------------------
autoapi_type = "python"
autoapi_dirs = ["../nucleus"]
autoapi_options = [
"members",
"no-undoc-members",
"inherited-members",
"show-module-summary",
"imported-members",
]
autoapi_template_dir = "_templates"
autoapi_root = "api"
autoapi_python_class_content = "both"
autoapi_member_order = "groupwise"
autodoc_typehints = "description"
autoapi_add_toctree_entry = False
napoleon_include_init_with_doc = True
| 29.698925
| 79
| 0.664736
|
4a06efc167720466718bb314ea6e4f051d38098e
| 2,941
|
py
|
Python
|
tests/links_tests/test_empirical_normalization.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 923
|
2017-06-01T08:27:42.000Z
|
2022-03-24T02:17:04.000Z
|
tests/links_tests/test_empirical_normalization.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 374
|
2017-06-02T02:07:50.000Z
|
2021-06-29T22:05:38.000Z
|
tests/links_tests/test_empirical_normalization.py
|
yuishihara/chainerrl
|
74901712a8ed8207b9d526d3f45b04bf22996b8d
|
[
"MIT"
] | 253
|
2017-06-04T10:31:50.000Z
|
2022-03-19T15:20:51.000Z
|
import unittest
import chainer
from chainer import testing
import numpy as np
from chainerrl.links import empirical_normalization
class TestEmpiricalNormalization(unittest.TestCase):
def test_small_cpu(self):
self._test_small(gpu=-1)
@testing.attr.gpu
def test_small_gpu(self):
self._test_small(gpu=0)
def _test_small(self, gpu):
en = empirical_normalization.EmpiricalNormalization(10)
if gpu >= 0:
chainer.cuda.get_device_from_id(gpu).use()
en.to_gpu()
xp = en.xp
xs = []
for t in range(10):
x = xp.random.normal(loc=4, scale=2, size=(t + 3, 10))
en(x)
xs.extend(list(x))
xs = xp.stack(xs)
true_mean = xp.mean(xs, axis=0)
true_std = xp.std(xs, axis=0)
xp.testing.assert_allclose(en.mean, true_mean, rtol=1e-4)
xp.testing.assert_allclose(en.std, true_std, rtol=1e-4)
@testing.attr.slow
def test_large(self):
en = empirical_normalization.EmpiricalNormalization(10)
for _ in range(10000):
x = np.random.normal(loc=4, scale=2, size=(7, 10))
en(x)
x = 2 * np.random.normal(loc=4, scale=2, size=(1, 10))
enx = en(x, update=False)
np.testing.assert_allclose(en.mean, 4, rtol=1e-1)
np.testing.assert_allclose(en.std, 2, rtol=1e-1)
# Compare with the ground-truth normalization
np.testing.assert_allclose((x - 4) / 2, enx, rtol=1e-1)
# Test inverse
np.testing.assert_allclose(x, en.inverse(enx), rtol=1e-4)
def test_batch_axis(self):
shape = (2, 3, 4)
for batch_axis in range(3):
en = empirical_normalization.EmpiricalNormalization(
shape=shape[:batch_axis] + shape[batch_axis + 1:],
batch_axis=batch_axis,
)
for _ in range(10):
x = np.random.rand(*shape)
en(x)
def test_until(self):
en = empirical_normalization.EmpiricalNormalization(7, until=20)
last_mean = None
last_std = None
for t in range(15):
en(np.random.rand(2, 7) + t)
if 1 <= t < 10:
self.assertFalse(np.allclose(en.mean, last_mean, rtol=1e-4))
self.assertFalse(np.allclose(en.std, last_std, rtol=1e-4))
elif t >= 10:
np.testing.assert_allclose(en.mean, last_mean, rtol=1e-4)
np.testing.assert_allclose(en.std, last_std, rtol=1e-4)
last_mean = en.mean
last_std = en.std
def test_mixed_inputs(self):
en = empirical_normalization.EmpiricalNormalization(7)
for t in range(5):
y = en(np.random.rand(t + 1, 7))
self.assertIsInstance(y, np.ndarray)
y = en(chainer.Variable(np.random.rand(t + 1, 7)))
self.assertIsInstance(y, chainer.Variable)
| 32.677778
| 76
| 0.582115
|
4a06f0cc98b9d053f4f8cff95bc5d00ab98c2e38
| 6,714
|
py
|
Python
|
96-well-plate-randomiser.py
|
olipayne/96-Well-Plate-Randomiser
|
8265ebf9962bb338744cd43be989dc18f0727273
|
[
"MIT"
] | 3
|
2022-01-28T08:05:35.000Z
|
2022-02-04T11:19:39.000Z
|
96-well-plate-randomiser.py
|
olipayne/96-Well-Plate-Randomiser
|
8265ebf9962bb338744cd43be989dc18f0727273
|
[
"MIT"
] | 1
|
2022-01-14T15:47:20.000Z
|
2022-01-27T21:40:30.000Z
|
96-well-plate-randomiser.py
|
olipayne/96-Well-Plate-Randomiser
|
8265ebf9962bb338744cd43be989dc18f0727273
|
[
"MIT"
] | null | null | null |
# Please read the README.md file for more information.
################################################################################
# Filename to load the 96 well plate data from *THIS WILL NOT BE MODIFIED*
csv_input = "input.csv"
# Filename to save the data from input.csv with the additional column of randomised coordinates
csv_output = "output.csv"
# Filename to save the sorted data to a CSV file
csv_sorted = "sorted.csv"
# Set to True if the CSV has a header row, False if not
csv_has_header = True
# Column number of the plate ID in the CSV (starting at 0)
csv_plate_id_column = 5
# Column number of the well coordinates in the CSV (starting at 0)
csv_well_column = 6
# Control (plate, well) tuples which should not be moved (if empty, all wells will be moved)
control_wells = [("78500", "A1"), ("BB143_plate07", "A12"), ("BB143_plate15", "H12")]
# Number of plates (for sanity checking)
number_of_plates = 3
# How many times should the randomisation be performed? Doesn't really make a difference, but it's here if you want to.
number_of_randomisations = 100
# If the value in this column ID is empty, then consider the row as an empty row and skip it
empty_column_id = 0
# Rename plates in order of appearance in the CSV, if empty then no renaming will be done
rename_plates = ["final_1", "final_2", "final_3"]
################################################################################
import csv
import random
# Well Plate Layout
letters = ["A", "B", "C", "D", "E", "F", "G", "H"]
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
# Get a list of all plates in the CSV
plates = []
with open(csv_input, "r") as csv_file:
csv_reader = csv.reader(csv_file)
if csv_has_header:
next(csv_reader)
for row in csv_reader:
if row[csv_plate_id_column] not in plates:
plates.append(str(row[csv_plate_id_column]))
print("Found " + str(len(plates)) + " plates in the CSV.")
# Check that the number of plates to be renamed matches the number of plates found in the CSV, if 'rename_plates' is not empty
if len(rename_plates) > 0 and len(rename_plates) != len(plates):
print(
"ERROR: The number of plates to be renamed does not match the number of plates found in the CSV!"
)
exit()
# If there are not exactly the correct number of unique plates found, we have a problem.
if len(plates) != number_of_plates:
print(
"ERROR: Unexpected number of plates found in CSV. Expected "
+ str(number_of_plates)
+ " but found "
+ str(len(plates))
+ "."
)
exit()
# If we are renaming plates, then we need to override the new plate names
if len(rename_plates) > 0:
generate_plates = rename_plates
# Let's print which plates we are renaming
print("Renaming plates:")
for i in range(len(plates)):
print(plates[i] + " -> " + rename_plates[i])
else:
generate_plates = plates
print("No renaming required.")
# Build a list of tuples of all possible coordinates for each plate
wells = []
for plate in generate_plates:
for number in numbers:
for letter in letters:
wells.append((str(plate), letter + str(number)))
# Remove any control wells from the wells list, taking into account the rename_plates override
for control_plate, control_well in control_wells:
if len(rename_plates) > 0:
control_plate = rename_plates[plates.index(control_plate)]
wells.remove((control_plate, control_well))
with open(csv_input, "r") as csv_file:
with open(csv_output, "w") as csv_output_file:
csv_reader = csv.reader(csv_file)
csv_writer = csv.writer(csv_output_file)
# Count how many columns are in the CSV
column_count = 0
for row in csv_reader:
column_count = len(row)
# Rewind the file
csv_file.seek(0)
# If the CSV has a header, add two columns to the end and add it to the output CSV
if csv_has_header:
csv_writer.writerow(next(csv_reader) + ["output_plate", "output_well"])
# Count the number of rows in the CSV
row_count = 0
for row in csv_reader:
row_count += 1
# Reset the CSV file to the start, line 1 if there is a header
csv_file.seek(0)
if csv_has_header:
next(csv_reader)
# If the number of available wells is smaller than the number of rows, we have a problem.
if len(wells) < row_count:
print(
"ERROR: Unexpected number of available wells. Need at least "
+ str(row_count)
+ " but found "
+ str(len(wells))
+ "."
)
exit()
# Slice the wells var to the number of rows we have, minus the number of control wells
wells = wells[: row_count - len(control_wells)]
# Shuffle the wells 'number_of_randomisation' times
for i in range(number_of_randomisations):
random.shuffle(wells)
# Loop through the input CSV and add the new columns to the output CSV
for row in csv_reader:
if (row[csv_plate_id_column], row[csv_well_column]) not in control_wells:
next_well = wells.pop()
row.extend([next_well[0], next_well[1]])
csv_writer.writerow(row)
else:
# Find the new renamed plate based on the original plate name
if len(rename_plates) > 0:
for i in range(len(plates)):
if row[csv_plate_id_column] == plates[i]:
plate_name = rename_plates[i]
else:
plate_name = row[csv_plate_id_column]
row.extend([plate_name, row[csv_well_column]])
csv_writer.writerow(row)
print("output CSV written to " + csv_output + ", now sorting...")
# Sort the output.csv file by the output_plate and output_well columns
with open(csv_output, "r") as csv_file:
with open(csv_sorted, "w") as csv_output_file:
csv_reader = csv.reader(csv_file)
csv_writer = csv.writer(csv_output_file)
# If the CSV has a header, print it as it is
if csv_has_header:
csv_writer.writerow(next(csv_reader))
# Sort the rows by the output_plate and output_well columns with natural sorting
for row in sorted(
csv_reader,
key=lambda x: (
x[column_count],
x[column_count + 1][:1],
int(x[column_count + 1][1:]),
),
):
csv_writer.writerow(row)
print("output CSV sorted and written to " + csv_sorted)
| 34.96875
| 126
| 0.618707
|
4a06f1a226163cce4bdb71aadfb37f35842ea4d2
| 8,513
|
py
|
Python
|
mango/tokenaccount.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
mango/tokenaccount.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
mango/tokenaccount.py
|
bednie/mango-explorer
|
4575395488e97a1f8cb52cc567e3307f11a28932
|
[
"MIT"
] | null | null | null |
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import spl.token.instructions as spl_token
import typing
from solana.keypair import Keypair
from solana.publickey import PublicKey
from solana.rpc.types import TokenAccountOpts
from spl.token.client import Token as SplToken
from spl.token.constants import (
ASSOCIATED_TOKEN_PROGRAM_ID,
TOKEN_PROGRAM_ID,
)
from .accountinfo import AccountInfo
from .addressableaccount import AddressableAccount
from .combinableinstructions import CombinableInstructions
from .context import Context
from .instrumentlookup import InstrumentLookup
from .instrumentvalue import InstrumentValue
from .layouts import layouts
from .observables import Disposable
from .tokens import Instrument, Token
from .version import Version
from .wallet import Wallet
from .websocketsubscription import (
WebSocketAccountSubscription,
WebSocketSubscriptionManager,
)
# # 🥭 TokenAccount class
#
class TokenAccount(AddressableAccount):
def __init__(
self,
account_info: AccountInfo,
version: Version,
owner: PublicKey,
value: InstrumentValue,
) -> None:
super().__init__(account_info)
self.version: Version = version
self.owner: PublicKey = owner
self.value: InstrumentValue = value
@staticmethod
def derive_associated_token_address(owner: PublicKey, token: Token) -> PublicKey:
address, _ = PublicKey.find_program_address(
seeds=[bytes(owner), bytes(TOKEN_PROGRAM_ID), bytes(token.mint)],
program_id=ASSOCIATED_TOKEN_PROGRAM_ID,
)
return address
@staticmethod
def create(context: Context, account: Keypair, token: Token) -> "TokenAccount":
spl_token = SplToken(
context.client.compatible_client, token.mint, TOKEN_PROGRAM_ID, account
)
owner = account.public_key
new_account_address = spl_token.create_account(owner)
created: typing.Optional[TokenAccount] = TokenAccount.load(
context, new_account_address
)
if created is None:
raise Exception(
f"Newly-created SPL token account could not be found at address {new_account_address}"
)
return created
@staticmethod
def fetch_all_for_owner_and_token(
context: Context, owner_public_key: PublicKey, token: Token
) -> typing.Sequence["TokenAccount"]:
opts = TokenAccountOpts(mint=token.mint)
token_accounts = context.client.get_token_accounts_by_owner(
owner_public_key, opts
)
all_accounts: typing.List[TokenAccount] = []
for token_account_response in token_accounts:
account_info = AccountInfo._from_response_values(
token_account_response["account"],
PublicKey(token_account_response["pubkey"]),
)
token_account = TokenAccount.parse(account_info, token)
all_accounts += [token_account]
return all_accounts
@staticmethod
def fetch_largest_for_owner_and_token(
context: Context, owner_public_key: PublicKey, token: Token
) -> typing.Optional["TokenAccount"]:
all_accounts = TokenAccount.fetch_all_for_owner_and_token(
context, owner_public_key, token
)
largest_account: typing.Optional[TokenAccount] = None
for token_account in all_accounts:
if (
largest_account is None
or token_account.value.value > largest_account.value.value
):
largest_account = token_account
return largest_account
@staticmethod
def find_or_create_token_address_to_use(
context: Context, wallet: Wallet, owner: PublicKey, token: Token
) -> PublicKey:
# This is a root wallet account - get the token account to use.
associated_token_address = spl_token.get_associated_token_address(
owner, token.mint
)
token_account: typing.Optional[TokenAccount] = TokenAccount.load(
context, associated_token_address
)
if token_account is not None:
# The associated token account exists so use it
return associated_token_address
# There is no associated token account. See if they have an old-style non-associated token account.
largest = TokenAccount.fetch_largest_for_owner_and_token(context, owner, token)
if largest is not None:
# There is an old-style account so use that.
return largest.address
# There is no old-style token account either, so create the proper associated token account.
signer = CombinableInstructions.from_wallet(wallet)
create_instruction = spl_token.create_associated_token_account(
wallet.address, owner, token.mint
)
create = CombinableInstructions.from_instruction(create_instruction)
transaction_ids = (signer + create).execute(context)
context.client.wait_for_confirmation(transaction_ids)
return associated_token_address
@staticmethod
def from_layout(
layout: typing.Any, account_info: AccountInfo, token: Token
) -> "TokenAccount":
token_value = InstrumentValue(token, token.shift_to_decimals(layout.amount))
return TokenAccount(
account_info, Version.UNSPECIFIED, layout.owner, token_value
)
@staticmethod
def parse(
account_info: AccountInfo,
token: typing.Optional[Token] = None,
instrument_lookup: typing.Optional[InstrumentLookup] = None,
) -> "TokenAccount":
data = account_info.data
if len(data) != layouts.TOKEN_ACCOUNT.sizeof():
raise Exception(
f"Data length ({len(data)}) does not match expected size ({layouts.TOKEN_ACCOUNT.sizeof()})"
)
layout = layouts.TOKEN_ACCOUNT.parse(data)
if token is None:
if instrument_lookup is None:
raise Exception(
"Neither 'Token' or 'InstrumentLookup' specified for parsing token data."
)
instrument: typing.Optional[Instrument] = instrument_lookup.find_by_mint(
layout.mint
)
if instrument is None:
raise Exception(
f"Could not find token data for token with mint '{layout.mint}'"
)
token = Token.ensure(instrument)
return TokenAccount.from_layout(layout, account_info, token)
@staticmethod
def load(context: Context, address: PublicKey) -> typing.Optional["TokenAccount"]:
account_info = AccountInfo.load(context, address)
if account_info is None or (
len(account_info.data) != layouts.TOKEN_ACCOUNT.sizeof()
):
return None
return TokenAccount.parse(
account_info, instrument_lookup=context.instrument_lookup
)
def subscribe(
self,
context: Context,
websocketmanager: WebSocketSubscriptionManager,
callback: typing.Callable[["TokenAccount"], None],
) -> Disposable:
token = Token.ensure(self.value.token)
def __parser(account_info: AccountInfo) -> TokenAccount:
return TokenAccount.parse(account_info, token=token)
subscription = WebSocketAccountSubscription(context, self.address, __parser)
websocketmanager.add(subscription)
subscription.publisher.subscribe(on_next=callback) # type: ignore[call-arg]
return subscription
def __str__(self) -> str:
return (
f"« TokenAccount {self.address}, Owner: {self.owner}, Value: {self.value} »"
)
| 37.337719
| 108
| 0.672853
|
4a06f1b076401f68d81cb784bb3483fac0e0f1e1
| 624
|
py
|
Python
|
scratch_ml/demo/autoencoder.py
|
siAyush/scratch_ml
|
b147515d9dda7d31567ad148528dc6f313cd739f
|
[
"MIT"
] | 23
|
2021-01-24T16:42:12.000Z
|
2022-03-24T01:19:44.000Z
|
scratch_ml/demo/autoencoder.py
|
siAyush/scratch_ml
|
b147515d9dda7d31567ad148528dc6f313cd739f
|
[
"MIT"
] | null | null | null |
scratch_ml/demo/autoencoder.py
|
siAyush/scratch_ml
|
b147515d9dda7d31567ad148528dc6f313cd739f
|
[
"MIT"
] | 2
|
2020-12-23T11:11:19.000Z
|
2020-12-23T11:11:34.000Z
|
import math
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from scratch_ml.deep_learning.optimizers import Adam
from scratch_ml.deep_learning import NeuralNetwork
from scratch_ml.deep_learning.layers import Dense, Dropout, Conv2D, Flatten, Activation, BatchNormalization
from scratch_ml.utils import to_categorical, train_test_split, CrossEntropy
class Autoencoder():
"""Autoencoder with deep fully-connected neural networks."""
def __init__(self):
pass
def encoder():
pass
def decoder():
pass
if __name__ == '__main__':
ae = Autoencoder()
| 24
| 107
| 0.753205
|
4a06f298fe8a4c5241af584131f36efa9bbfc8e9
| 1,144
|
py
|
Python
|
setup.py
|
khalilouardini/simpletransformers
|
6e6efa382875beed5a2db4e61d832591a1916e13
|
[
"Apache-2.0"
] | 2
|
2021-03-13T19:05:46.000Z
|
2021-11-07T20:03:36.000Z
|
setup.py
|
khalilouardini/simpletransformers
|
6e6efa382875beed5a2db4e61d832591a1916e13
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
khalilouardini/simpletransformers
|
6e6efa382875beed5a2db4e61d832591a1916e13
|
[
"Apache-2.0"
] | 3
|
2021-11-07T19:55:46.000Z
|
2022-01-24T15:25:33.000Z
|
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="simpletransformers",
version="0.47.4",
author="Thilina Rajapakse",
author_email="chaturangarajapakshe@gmail.com",
description="An easy-to-use wrapper library for the Transformers library.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ThilinaRajapakse/simpletransformers/",
packages=find_packages(),
scripts=["bin/simple-viewer"],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires=">=3.6",
install_requires=[
"numpy",
"requests",
"tqdm>=4.47.0",
"regex",
"transformers>=3.0.2",
"scipy",
"scikit-learn",
"seqeval",
"tensorboardx",
"pandas",
"tokenizers",
"wandb",
"streamlit",
],
)
| 28.6
| 79
| 0.61451
|
4a06f2a82419567d4b89d7c57d788d82d41b342f
| 2,443
|
py
|
Python
|
colin-api/colin_api/resources/__init__.py
|
rstens/lear
|
321a8de44c5369cae6db4c377f6f121d19ac52e0
|
[
"Apache-2.0"
] | 1
|
2019-09-05T03:38:00.000Z
|
2019-09-05T03:38:00.000Z
|
colin-api/colin_api/resources/__init__.py
|
rstens/lear
|
321a8de44c5369cae6db4c377f6f121d19ac52e0
|
[
"Apache-2.0"
] | 206
|
2019-08-09T18:30:53.000Z
|
2022-02-27T21:28:50.000Z
|
colin-api/colin_api/resources/__init__.py
|
rstens/lear
|
321a8de44c5369cae6db4c377f6f121d19ac52e0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes all of the resource endpoints mounted in Flask-Blueprint style.
Uses restplus namespaces to mount individual api endpoints into the service.
All services have 2 defaults sets of endpoints:
- ops
- meta
That are used to expose operational health information about the service, and meta information.
"""
from flask import Blueprint
from flask_restplus import Api
from .business import API as BUSINESS_API
from .directors import API as DIRECTORS_API
from .event import API as EVENT_API
from .filing import API as FILING_API
from .meta import API as META_API
from .office import API as OFFICE_API
from .ops import API as OPS_API
__all__ = ('API_BLUEPRINT', 'OPS_BLUEPRINT')
# This will add the Authorize button to the swagger docs
AUTHORIZATIONS = {
'apikey': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
OPS_BLUEPRINT = Blueprint('API_OPS', __name__, url_prefix='/ops')
API_OPS = Api(OPS_BLUEPRINT,
title='Service OPS API',
version='1.0',
description='The COLIN API for the Legal Entities System',
security=['apikey'],
authorizations=AUTHORIZATIONS)
API_OPS.add_namespace(OPS_API, path='/')
API_BLUEPRINT = Blueprint('API', __name__, url_prefix='/api/v1')
API = Api(API_BLUEPRINT,
title='COLIN API',
version='1.0',
description='The COLIN API for the Legal Entities System',
security=['apikey'],
authorizations=AUTHORIZATIONS)
API.add_namespace(META_API, path='/meta')
API.add_namespace(BUSINESS_API, path='/businesses')
API.add_namespace(DIRECTORS_API, path='/businesses/directors')
API.add_namespace(EVENT_API, path='/businesses/event')
API.add_namespace(DIRECTORS_API, path='/businesses/office')
API.add_namespace(FILING_API, path='/businesses/filings')
| 33.930556
| 95
| 0.7237
|
4a06f306d8913c6e9b449a09452f1cc23966c5d8
| 2,501
|
py
|
Python
|
flexmeasures/data/schemas/sensors.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 12
|
2021-12-18T10:41:10.000Z
|
2022-03-29T23:00:29.000Z
|
flexmeasures/data/schemas/sensors.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 103
|
2021-12-07T08:51:15.000Z
|
2022-03-31T13:28:48.000Z
|
flexmeasures/data/schemas/sensors.py
|
FlexMeasures/flexmeasures
|
a4367976d37ac5721b8eb3ce8a2414595e52c678
|
[
"Apache-2.0"
] | 3
|
2022-01-18T04:45:48.000Z
|
2022-03-14T09:48:22.000Z
|
from marshmallow import Schema, fields, validates, ValidationError
from flexmeasures.data import ma
from flexmeasures.data.models.generic_assets import GenericAsset
from flexmeasures.data.models.time_series import Sensor
from flexmeasures.data.schemas.utils import (
FMValidationError,
MarshmallowClickMixin,
with_appcontext_if_needed,
)
from flexmeasures.utils.unit_utils import is_valid_unit
class SensorSchemaMixin(Schema):
"""
Base sensor schema.
Here we include all fields which are implemented by timely_beliefs.SensorDBMixin
All classes inheriting from timely beliefs sensor don't need to repeat these.
In a while, this schema can represent our unified Sensor class.
When subclassing, also subclass from `ma.SQLAlchemySchema` and add your own DB model class, e.g.:
class Meta:
model = Asset
"""
name = ma.auto_field(required=True)
unit = ma.auto_field(required=True)
timezone = ma.auto_field()
event_resolution = fields.TimeDelta(required=True, precision="minutes")
entity_address = fields.String(dump_only=True)
@validates("unit")
def validate_unit(self, unit: str):
if not is_valid_unit(unit):
raise ValidationError(f"Unit '{unit}' cannot be handled.")
class SensorSchema(SensorSchemaMixin, ma.SQLAlchemySchema):
"""
Sensor schema, with validations.
"""
generic_asset_id = fields.Integer(required=True)
@validates("generic_asset_id")
def validate_generic_asset(self, generic_asset_id: int):
generic_asset = GenericAsset.query.get(generic_asset_id)
if not generic_asset:
raise ValidationError(
f"Generic asset with id {generic_asset_id} doesn't exist."
)
class Meta:
model = Sensor
class SensorIdField(MarshmallowClickMixin, fields.Int):
"""Field that deserializes to a Sensor and serializes back to an integer."""
@with_appcontext_if_needed()
def _deserialize(self, value: int, attr, obj, **kwargs) -> Sensor:
"""Turn a sensor id into a Sensor."""
sensor = Sensor.query.get(value)
if sensor is None:
raise FMValidationError(f"No sensor found with id {value}.")
# lazy loading now (sensor is somehow not in session after this)
sensor.generic_asset
return sensor
def _serialize(self, sensor: Sensor, attr, data, **kwargs) -> int:
"""Turn a Sensor into a sensor id."""
return sensor.id
| 33.346667
| 101
| 0.697721
|
4a06f3a1dd40063040d9a378fe704fef54b03a90
| 916
|
py
|
Python
|
models.py
|
pierrekarpov/RestfulDataScience
|
2bef9517a28d0e920b000766fc316ecb86f57d7a
|
[
"MIT"
] | null | null | null |
models.py
|
pierrekarpov/RestfulDataScience
|
2bef9517a28d0e920b000766fc316ecb86f57d7a
|
[
"MIT"
] | null | null | null |
models.py
|
pierrekarpov/RestfulDataScience
|
2bef9517a28d0e920b000766fc316ecb86f57d7a
|
[
"MIT"
] | null | null | null |
import sqlite3 as sql
db_name = "restful_data_science.db"
# TODO: need to check if no record with that filename exist
def insertModel(classifier_type, feature_count, file_name):
con = sql.connect(db_name)
cur = con.cursor()
cur.execute("INSERT INTO models (classifier_type, feature_count, file_name) VALUES (?, ?, ?)", (classifier_type, feature_count, file_name))
id = cur.lastrowid
con.commit()
con.close()
return id
def retrieveModels():
con = sql.connect(db_name)
cur = con.cursor()
cur.execute("SELECT id, classifier_type, feature_count, file_name FROM models")
models = cur.fetchall()
con.close()
return models
def retrieveModel(id):
con = sql.connect(db_name)
cur = con.cursor()
cur.execute("SELECT id, classifier_type, feature_count, file_name FROM models WHERE id = " + str(id))
model = cur.fetchall()
con.close()
return model
| 29.548387
| 143
| 0.69214
|
4a06f51b9c3ec6372742163ea763231187d993d1
| 3,497
|
py
|
Python
|
Fingerprint/LinearPaths.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
Fingerprint/LinearPaths.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
Fingerprint/LinearPaths.py
|
UnixJunkie/frowns
|
427e4c11a8a4dbe865828d18221899478497795e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Linear Paths
Generate linear paths for a molecule.
For example, generate all linear paths up to depth 5
paths = generatePaths(molecule, maxdepth=5)
These paths can be used for a variety of cases, but we are using
them for the purposes of fingerprinting molecules.
See Fingerprint.py
"""
from Fingerprint import *
# Once again we are using a depth first search approach to walking
# a molecule. Each linear span is converted into a string value
# this string value is used to create the fingerprints.
#
# Modify name_atom and name_bond to change how the
# hashing works. Do you want charges? aromaticity?
# anything.
# XXX FIX ME
# A simple optimization is to cache all the names before
# the dfs walk.
# There are more optimizations for later...
def name_atom(atom):
if atom.aromatic:
if atom.symbol == "N" and atom.imp_hcount == 0 and atom.hcount == 1:
return "nH"
else:
return atom.symbol[0].lower() + atom.symbol[1:]
return atom.symbol
def name_bond(bond, lookup={1:'-',2:'=',3:'#',4:'~'}):
return lookup[bond.bondtype]
def _dfswalk(atom, visitedAtoms, path, paths, depth, maxdepth,
name_atom, name_bond):
if depth >= maxdepth:
return
for bond in atom.bonds:
oatom = bond.xatom(atom)
if not visitedAtoms.has_key(oatom.handle):
path.append("%s%s"%(name_bond(bond), name_atom(oatom)))
# only keep the path if the first character in the head of the path
# is less than the last character in the end of the path
if path[0][-1] <= path[-1][-1]:
p = (depth+1, "".join(path))
paths[p] = 1
visitedAtoms[atom.handle] = 1
_dfswalk(oatom, visitedAtoms, path, paths, depth+1, maxdepth,
name_atom, name_bond)
path.pop()
del visitedAtoms[atom.handle]
def generatePaths(molecule, maxdepth=5,
name_atom=name_atom, name_bond=name_bond):
"""(molecule, maxdepth, *name_atom, *name_bond) -> linear paths
Generate all linear paths through a molecule up to maxdepth
change name_atom and name_bond to name the atoms and bonds
in the molecule
name_atom and name_bond must return a stringable value"""
paths = {}
for atom in molecule.atoms:
_dfswalk(atom, {atom:1}, [name_atom(atom)], paths, 1, maxdepth,
name_atom, name_bond)
return paths.keys()
class SplitFingerprintGenerator:
def __init__(self, maxdepth=7, integersPerAtoms=[4]*6):
self.maxdepth = maxdepth
self.integersPerAtoms = integersPerAtoms
assert maxdepth-1 == len(integersPerAtoms)
def createFP(self, molecule):
p = SplitFingerprint(self.maxdepth, self.integersPerAtoms)
paths = generatePaths(molecule, maxdepth=self.maxdepth)
paths.sort()
for length, s in paths:
p.addPath(length, s)
return p
if __name__ == "__main__":
from frowns import Smiles
mol = Smiles.smilin("CCCc1cc[nH]c1")
mol2 = Smiles.smilin("c1cc[nH]c1")
paths = generatePaths(mol)
pathLengths = {}
for p in paths:
l, s = p
pathLengths[l] = pathLengths.get(l, []) + [s]
generator = SplitFingerprintGenerator()
sp = generator.createFP(mol)
sp2 = generator.createFP(mol2)
assert sp in sp
assert sp2 in sp
print "".join(map(str,sp.to_list()))
| 30.946903
| 79
| 0.633114
|
4a06f59bcb32af08c92959ac0a85dd687b55bd30
| 15,020
|
py
|
Python
|
Lib/lib-old/ni.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 5
|
2022-03-26T21:53:36.000Z
|
2022-03-30T21:47:20.000Z
|
Lib/lib-old/ni.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 6
|
2020-11-18T15:48:14.000Z
|
2021-05-03T21:20:50.000Z
|
Lib/lib-old/ni.py
|
marcosptf/cpython-2.0.1
|
73c739a764e8b1dc84640e73b880bc66e1916bca
|
[
"PSF-2.0"
] | 2
|
2015-07-16T08:14:13.000Z
|
2022-03-27T01:55:17.000Z
|
"""New import scheme with package support.
Quick Reference
---------------
- To enable package support, execute "import ni" before importing any
packages. Importing this module automatically installs the relevant
import hooks.
- To create a package named spam containing sub-modules ham, bacon and
eggs, create a directory spam somewhere on Python's module search
path (i.e. spam's parent directory must be one of the directories in
sys.path or $PYTHONPATH); then create files ham.py, bacon.py and
eggs.py inside spam.
- To import module ham from package spam and use function hamneggs()
from that module, you can either do
import spam.ham # *not* "import spam" !!!
spam.ham.hamneggs()
or
from spam import ham
ham.hamneggs()
or
from spam.ham import hamneggs
hamneggs()
- Importing just "spam" does not do what you expect: it creates an
empty package named spam if one does not already exist, but it does
not import spam's submodules. The only submodule that is guaranteed
to be imported is spam.__init__, if it exists. Note that
spam.__init__ is a submodule of package spam. It can reference to
spam's namespace via the '__.' prefix, for instance
__.spam_inited = 1 # Set a package-level variable
Theory of Operation
-------------------
A Package is a module that can contain other modules. Packages can be
nested. Package introduce dotted names for modules, like P.Q.M, which
could correspond to a file P/Q/M.py found somewhere on sys.path. It
is possible to import a package itself, though this makes little sense
unless the package contains a module called __init__.
A package has two variables that control the namespace used for
packages and modules, both initialized to sensible defaults the first
time the package is referenced.
(1) A package's *module search path*, contained in the per-package
variable __path__, defines a list of *directories* where submodules or
subpackages of the package are searched. It is initialized to the
directory containing the package. Setting this variable to None makes
the module search path default to sys.path (this is not quite the same
as setting it to sys.path, since the latter won't track later
assignments to sys.path).
(2) A package's *import domain*, contained in the per-package variable
__domain__, defines a list of *packages* that are searched (using
their respective module search paths) to satisfy imports. It is
initialized to the list consisting of the package itself, its parent
package, its parent's parent, and so on, ending with the root package
(the nameless package containing all top-level packages and modules,
whose module search path is None, implying sys.path).
The default domain implements a search algorithm called "expanding
search". An alternative search algorithm called "explicit search"
fixes the import search path to contain only the root package,
requiring the modules in the package to name all imported modules by
their full name. The convention of using '__' to refer to the current
package (both as a per-module variable and in module names) can be
used by packages using explicit search to refer to modules in the same
package; this combination is known as "explicit-relative search".
The PackageImporter and PackageLoader classes together implement the
following policies:
- There is a root package, whose name is ''. It cannot be imported
directly but may be referenced, e.g. by using '__' from a top-level
module.
- In each module or package, the variable '__' contains a reference to
the parent package; in the root package, '__' points to itself.
- In the name for imported modules (e.g. M in "import M" or "from M
import ..."), a leading '__' refers to the current package (i.e.
the package containing the current module); leading '__.__' and so
on refer to the current package's parent, and so on. The use of
'__' elsewhere in the module name is not supported.
- Modules are searched using the "expanding search" algorithm by
virtue of the default value for __domain__.
- If A.B.C is imported, A is searched using __domain__; then
subpackage B is searched in A using its __path__, and so on.
- Built-in modules have priority: even if a file sys.py exists in a
package, "import sys" imports the built-in sys module.
- The same holds for frozen modules, for better or for worse.
- Submodules and subpackages are not automatically loaded when their
parent packages is loaded.
- The construct "from package import *" is illegal. (It can still be
used to import names from a module.)
- When "from package import module1, module2, ..." is used, those
modules are explicitly loaded.
- When a package is loaded, if it has a submodule __init__, that
module is loaded. This is the place where required submodules can
be loaded, the __path__ variable extended, etc. The __init__ module
is loaded even if the package was loaded only in order to create a
stub for a sub-package: if "import P.Q.R" is the first reference to
P, and P has a submodule __init__, P.__init__ is loaded before P.Q
is even searched.
Caveats:
- It is possible to import a package that has no __init__ submodule;
this is not particularly useful but there may be useful applications
for it (e.g. to manipulate its search paths from the outside!).
- There are no special provisions for os.chdir(). If you plan to use
os.chdir() before you have imported all your modules, it is better
not to have relative pathnames in sys.path. (This could actually be
fixed by changing the implementation of path_join() in the hook to
absolutize paths.)
- Packages and modules are introduced in sys.modules as soon as their
loading is started. When the loading is terminated by an exception,
the sys.modules entries remain around.
- There are no special measures to support mutually recursive modules,
but it will work under the same conditions where it works in the
flat module space system.
- Sometimes dummy entries (whose value is None) are entered in
sys.modules, to indicate that a particular module does not exist --
this is done to speed up the expanding search algorithm when a
module residing at a higher level is repeatedly imported (Python
promises that importing a previously imported module is cheap!)
- Although dynamically loaded extensions are allowed inside packages,
the current implementation (hardcoded in the interpreter) of their
initialization may cause problems if an extension invokes the
interpreter during its initialization.
- reload() may find another version of the module only if it occurs on
the package search path. Thus, it keeps the connection to the
package to which the module belongs, but may find a different file.
XXX Need to have an explicit name for '', e.g. '__root__'.
"""
import imp
import string
import sys
import __builtin__
import ihooks
from ihooks import ModuleLoader, ModuleImporter
class PackageLoader(ModuleLoader):
"""A subclass of ModuleLoader with package support.
find_module_in_dir() will succeed if there's a subdirectory with
the given name; load_module() will create a stub for a package and
load its __init__ module if it exists.
"""
def find_module_in_dir(self, name, dir):
if dir is not None:
dirname = self.hooks.path_join(dir, name)
if self.hooks.path_isdir(dirname):
return None, dirname, ('', '', 'PACKAGE')
return ModuleLoader.find_module_in_dir(self, name, dir)
def load_module(self, name, stuff):
file, filename, info = stuff
suff, mode, type = info
if type == 'PACKAGE':
return self.load_package(name, stuff)
if sys.modules.has_key(name):
m = sys.modules[name]
else:
sys.modules[name] = m = imp.new_module(name)
self.set_parent(m)
if type == imp.C_EXTENSION and '.' in name:
return self.load_dynamic(name, stuff)
else:
return ModuleLoader.load_module(self, name, stuff)
def load_dynamic(self, name, stuff):
file, filename, (suff, mode, type) = stuff
# Hack around restriction in imp.load_dynamic()
i = string.rfind(name, '.')
tail = name[i+1:]
if sys.modules.has_key(tail):
save = sys.modules[tail]
else:
save = None
sys.modules[tail] = imp.new_module(name)
try:
m = imp.load_dynamic(tail, filename, file)
finally:
if save:
sys.modules[tail] = save
else:
del sys.modules[tail]
sys.modules[name] = m
return m
def load_package(self, name, stuff):
file, filename, info = stuff
if sys.modules.has_key(name):
package = sys.modules[name]
else:
sys.modules[name] = package = imp.new_module(name)
package.__path__ = [filename]
self.init_package(package)
return package
def init_package(self, package):
self.set_parent(package)
self.set_domain(package)
self.call_init_module(package)
def set_parent(self, m):
name = m.__name__
if '.' in name:
name = name[:string.rfind(name, '.')]
else:
name = ''
m.__ = sys.modules[name]
def set_domain(self, package):
name = package.__name__
package.__domain__ = domain = [name]
while '.' in name:
name = name[:string.rfind(name, '.')]
domain.append(name)
if name:
domain.append('')
def call_init_module(self, package):
stuff = self.find_module('__init__', package.__path__)
if stuff:
m = self.load_module(package.__name__ + '.__init__', stuff)
package.__init__ = m
class PackageImporter(ModuleImporter):
"""Importer that understands packages and '__'."""
def __init__(self, loader = None, verbose = 0):
ModuleImporter.__init__(self,
loader or PackageLoader(None, verbose), verbose)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
if globals.has_key('__'):
package = globals['__']
else:
# No calling context, assume in root package
package = sys.modules['']
if name[:3] in ('__.', '__'):
p = package
name = name[3:]
while name[:3] in ('__.', '__'):
p = p.__
name = name[3:]
if not name:
return self.finish(package, p, '', fromlist)
if '.' in name:
i = string.find(name, '.')
name, tail = name[:i], name[i:]
else:
tail = ''
mname = p.__name__ and p.__name__+'.'+name or name
m = self.get1(mname)
return self.finish(package, m, tail, fromlist)
if '.' in name:
i = string.find(name, '.')
name, tail = name[:i], name[i:]
else:
tail = ''
for pname in package.__domain__:
mname = pname and pname+'.'+name or name
m = self.get0(mname)
if m: break
else:
raise ImportError, "No such module %s" % name
return self.finish(m, m, tail, fromlist)
def finish(self, module, m, tail, fromlist):
# Got ....A; now get ....A.B.C.D
yname = m.__name__
if tail and sys.modules.has_key(yname + tail): # Fast path
yname, tail = yname + tail, ''
m = self.get1(yname)
while tail:
i = string.find(tail, '.', 1)
if i > 0:
head, tail = tail[:i], tail[i:]
else:
head, tail = tail, ''
yname = yname + head
m = self.get1(yname)
# Got ....A.B.C.D; now finalize things depending on fromlist
if not fromlist:
return module
if '__' in fromlist:
raise ImportError, "Can't import __ from anywhere"
if not hasattr(m, '__path__'): return m
if '*' in fromlist:
raise ImportError, "Can't import * from a package"
for f in fromlist:
if hasattr(m, f): continue
fname = yname + '.' + f
self.get1(fname)
return m
def get1(self, name):
m = self.get(name)
if not m:
raise ImportError, "No module named %s" % name
return m
def get0(self, name):
m = self.get(name)
if not m:
sys.modules[name] = None
return m
def get(self, name):
# Internal routine to get or load a module when its parent exists
if sys.modules.has_key(name):
return sys.modules[name]
if '.' in name:
i = string.rfind(name, '.')
head, tail = name[:i], name[i+1:]
else:
head, tail = '', name
path = sys.modules[head].__path__
stuff = self.loader.find_module(tail, path)
if not stuff:
return None
sys.modules[name] = m = self.loader.load_module(name, stuff)
if head:
setattr(sys.modules[head], tail, m)
return m
def reload(self, module):
name = module.__name__
if '.' in name:
i = string.rfind(name, '.')
head, tail = name[:i], name[i+1:]
path = sys.modules[head].__path__
else:
tail = name
path = sys.modules[''].__path__
stuff = self.loader.find_module(tail, path)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
def unload(self, module):
if hasattr(module, '__path__'):
raise ImportError, "don't know how to unload packages yet"
PackageImporter.unload(self, module)
def install(self):
if not sys.modules.has_key(''):
sys.modules[''] = package = imp.new_module('')
package.__path__ = None
self.loader.init_package(package)
for m in sys.modules.values():
if not m: continue
if not hasattr(m, '__'):
self.loader.set_parent(m)
ModuleImporter.install(self)
def install(v = 0):
ihooks.install(PackageImporter(None, v))
def uninstall():
ihooks.uninstall()
def ni(v = 0):
install(v)
def no():
uninstall()
def test():
import pdb
try:
testproper()
except:
sys.last_type, sys.last_value, sys.last_traceback = sys.exc_info()
print
print sys.last_type, ':', sys.last_value
print
pdb.pm()
def testproper():
install(1)
try:
import mactest
print dir(mactest)
raw_input('OK?')
finally:
uninstall()
if __name__ == '__main__':
test()
else:
install()
| 34.528736
| 74
| 0.637683
|
4a06f5c1b63ca26d51cf29533b6e1daeeadb4472
| 2,531
|
py
|
Python
|
beaconrunner/graphing.py
|
SHSR2001/beaconrunner
|
d4d8db1159ad049eab3feb243f2c672f8c7bf180
|
[
"MIT"
] | null | null | null |
beaconrunner/graphing.py
|
SHSR2001/beaconrunner
|
d4d8db1159ad049eab3feb243f2c672f8c7bf180
|
[
"MIT"
] | null | null | null |
beaconrunner/graphing.py
|
SHSR2001/beaconrunner
|
d4d8db1159ad049eab3feb243f2c672f8c7bf180
|
[
"MIT"
] | 1
|
2022-02-14T11:15:17.000Z
|
2022-02-14T11:15:17.000Z
|
from .specs import (
SLOTS_PER_EPOCH, MAX_VALIDATORS_PER_COMMITTEE, SECONDS_PER_SLOT,
MIN_GENESIS_TIME, GENESIS_DELAY,
Checkpoint, AttestationData, Attestation,
BeaconBlock, BeaconBlockBody, SignedBeaconBlock,
get_block_root_at_slot, compute_start_slot_at_epoch, process_slots,
get_current_epoch, get_epoch_signature, process_block,get_head,
)
from .network import (
Network,
update_network, disseminate_attestations,
disseminate_sync_committees,
disseminate_block
)
from .validatorlib import (
BRValidator,
get_attestation_signature, aggregate_attestations, should_process_attestation,
make_block,
)
from eth2spec.utils.ssz.ssz_typing import Container, List, uint64, Bitlist, Bitvector, Bytes32
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
import networkx as nx
import pydot
import igraph as ig
import matplotlib.pyplot as plt
class GraphingData:
def __init__(
self,
G1 = nx.DiGraph(),
last_slot_graphed = None,
vertex_number = 0,
G2 = None
):
self.G1 = G1
self.G1.clear()
self.last_slot_graphed = last_slot_graphed
self.vertex_number = vertex_number
self.G2 = G2
def draw_graph(self, network: Network):
store = network.validators[0].store
block_head = get_head(store)
block = store.blocks[block_head]
parent_root = block.parent_root
parent_block = store.blocks[parent_root]
if str(block.state_root.hex()[0:2]) not in self.G1:
if self.G1.number_of_nodes() == 0:
vertex = str(block.state_root.hex()[0:2])
self.G1.add_node(str(block.state_root.hex()[0:2]))
#print(self.G1.vs.indices)
if self.G1.number_of_nodes() > 1:
self.G1.add_edge(str(block.state_root.hex()[0:2]), str(parent_block.state_root.hex()[0:2]))
if (self.last_slot_graphed != network.validators[0].data.slot) and (network.validators[0].data.slot != 1):
self.last_slot_graphed = network.validators[0].data.slot
self.G2 = ig.Graph.Adjacency((nx.to_numpy_matrix(self.G1) > 0).tolist())
#self.G2 = ig.Graph(len(self.G1), list(zip(*list(zip(*nx.to_edgelist(self.G1)))[:2])))
layout = self.G2.layout_reingold_tilford(mode="in", root=0)
fig, ax = plt.subplots()
ig.plot(self.G2, layout=layout, target=ax, bbox = (0,0,500,500))
#nx.draw(self.G1, with_labels=True)
plt.show()
| 37.220588
| 114
| 0.663769
|
4a06f748d850fa5105e0900cfb79f30e14cdaae2
| 1,893
|
py
|
Python
|
netdev/vendors/fujitsu/fujitsu_switch.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 199
|
2016-06-24T14:00:33.000Z
|
2022-02-14T07:48:44.000Z
|
netdev/vendors/fujitsu/fujitsu_switch.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 55
|
2017-05-08T10:01:26.000Z
|
2021-07-02T00:54:33.000Z
|
netdev/vendors/fujitsu/fujitsu_switch.py
|
maliciousgroup/netdev
|
e2585ac24891cba172fc2056e9868e1d7c41ddc2
|
[
"Apache-2.0"
] | 54
|
2016-12-29T13:28:00.000Z
|
2022-03-01T04:58:19.000Z
|
"""Subclass specific to Fujitsu Blade Switch"""
import re
from netdev.logger import logger
from netdev.vendors.ios_like import IOSLikeDevice
class FujitsuSwitch(IOSLikeDevice):
"""Class for working with Fujitsu Blade switch"""
_pattern = r"\({prompt}.*?\) (\(.*?\))?[{delimiters}]"
"""Pattern for using in reading buffer. When it found processing ends"""
_disable_paging_command = "no pager"
"""Command for disabling paging"""
_config_enter = "conf"
"""Command for entering to configuration mode"""
async def _set_base_prompt(self):
"""
Setting two important vars
base_prompt - textual prompt in CLI (usually hostname)
base_pattern - regexp for finding the end of command. IT's platform specific parameter
For Fujitsu devices base_pattern is "(prompt) (\(.*?\))?[>|#]"
"""
logger.info("Host {}: Setting base prompt".format(self._host))
prompt = await self._find_prompt()
# Strip off trailing terminator
self._base_prompt = prompt[1:-3]
delimiters = map(re.escape, type(self)._delimiter_list)
delimiters = r"|".join(delimiters)
base_prompt = re.escape(self._base_prompt[:12])
pattern = type(self)._pattern
self._base_pattern = pattern.format(prompt=base_prompt, delimiters=delimiters)
logger.debug("Host {}: Base Prompt: {}".format(self._host, self._base_prompt))
logger.debug("Host {}: Base Pattern: {}".format(self._host, self._base_pattern))
return self._base_prompt
@staticmethod
def _normalize_linefeeds(a_string):
"""
Convert '\r\r\n','\r\n', '\n\r' to '\n and remove extra '\n\n' in the text
"""
newline = re.compile(r"(\r\r\n|\r\n|\n\r)")
return newline.sub("\n", a_string).replace("\n\n", "\n")
| 38.632653
| 99
| 0.620708
|
4a06f7a91672e4986ca54aa61c48fd69ff107243
| 22,512
|
py
|
Python
|
detectron/roi_data/rpn.py
|
fyangneil/DSFPN
|
95ba534d451598db7af05b009aec9b40ac675182
|
[
"Apache-2.0"
] | null | null | null |
detectron/roi_data/rpn.py
|
fyangneil/DSFPN
|
95ba534d451598db7af05b009aec9b40ac675182
|
[
"Apache-2.0"
] | null | null | null |
detectron/roi_data/rpn.py
|
fyangneil/DSFPN
|
95ba534d451598db7af05b009aec9b40ac675182
|
[
"Apache-2.0"
] | 1
|
2020-12-09T08:32:37.000Z
|
2020-12-09T08:32:37.000Z
|
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Minibatch construction for Region Proposal Networks (RPN)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
import numpy.random as npr
from detectron.core.config import cfg
import detectron.roi_data.data_utils as data_utils
import detectron.utils.blob as blob_utils
import detectron.utils.boxes as box_utils
logger = logging.getLogger(__name__)
def get_rpn_blob_names(is_training=True):
"""Blob names used by RPN."""
# im_info: (height, width, image scale)
blob_names = ['im_info']
if is_training:
# gt boxes: (batch_idx, x1, y1, x2, y2, cls)
blob_names += ['roidb']
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# Same format as RPN blobs, but one per FPN level
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
blob_names += [
'rpn_labels_int32_wide_fpn' + str(lvl),
'rpn_bbox_targets_wide_fpn' + str(lvl),
'rpn_bbox_inside_weights_wide_fpn' + str(lvl),
'rpn_bbox_outside_weights_wide_fpn' + str(lvl)
]
else:
# Single level RPN blobs
blob_names += [
'rpn_labels_int32_wide',
'rpn_bbox_targets_wide',
'rpn_bbox_inside_weights_wide',
'rpn_bbox_outside_weights_wide'
]
return blob_names
def get_deep_sup_rpn_blob_names(is_training=True):
"""Blob names used by RPN."""
# im_info: (height, width, image scale)
blob_names = ['deep_sup_rpn_im_info']
if is_training:
# gt boxes: (batch_idx, x1, y1, x2, y2, cls)
blob_names += ['deep_sup_rpn_roidb']
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# Same format as RPN blobs, but one per FPN level
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
blob_names += [
'deep_sup_rpn_labels_int32_wide_fpn' + str(lvl),
'deep_sup_rpn_bbox_targets_wide_fpn' + str(lvl),
'deep_sup_rpn_bbox_inside_weights_wide_fpn' + str(lvl),
'deep_sup_rpn_bbox_outside_weights_wide_fpn' + str(lvl)
]
else:
# Single level RPN blobs
blob_names += [
'deep_sup_rpn_labels_int32_wide',
'deep_sup_rpn_bbox_targets_wide',
'deep_sup_rpn_bbox_inside_weights_wide',
'deep_sup_rpn_bbox_outside_weights_wide'
]
return blob_names
def add_rpn_blobs(blobs, im_scales, roidb):
"""Add blobs needed training RPN-only and end-to-end Faster R-CNN models."""
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# RPN applied to many feature levels, as in the FPN paper
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
foas = []
for lvl in range(k_min, k_max + 1):
field_stride = 2.**lvl
anchor_sizes = (cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), )
anchor_aspect_ratios = cfg.FPN.RPN_ASPECT_RATIOS
foa = data_utils.get_field_of_anchors(
field_stride, anchor_sizes, anchor_aspect_ratios
)
foas.append(foa)
all_anchors = np.concatenate([f.field_of_anchors for f in foas])
else:
foa = data_utils.get_field_of_anchors(
cfg.RPN.STRIDE, cfg.RPN.SIZES, cfg.RPN.ASPECT_RATIOS
)
all_anchors = foa.field_of_anchors
for im_i, entry in enumerate(roidb):
scale = im_scales[im_i]
im_height = np.round(entry['height'] * scale)
im_width = np.round(entry['width'] * scale)
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0)
)[0]
gt_rois = entry['boxes'][gt_inds, :] * scale
im_info = np.array([[im_height, im_width, scale]], dtype=np.float32)
blobs['im_info'].append(im_info)
# Add RPN targets
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# RPN applied to many feature levels, as in the FPN paper
rpn_blobs = _get_rpn_blobs(
im_height, im_width, foas, all_anchors, gt_rois
)
for i, lvl in enumerate(range(k_min, k_max + 1)):
for k, v in rpn_blobs[i].items():
blobs[k + '_fpn' + str(lvl)].append(v)
else:
# Classical RPN, applied to a single feature level
rpn_blobs = _get_rpn_blobs(
im_height, im_width, [foa], all_anchors, gt_rois
)
for k, v in rpn_blobs.items():
blobs[k].append(v)
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
valid_keys = [
'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes',
'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints'
]
minimal_roidb = [{} for _ in range(len(roidb))]
for i, e in enumerate(roidb):
for k in valid_keys:
if k in e:
minimal_roidb[i][k] = e[k]
blobs['roidb'] = blob_utils.serialize(minimal_roidb)
# Always return valid=True, since RPN minibatches are valid by design
return True
def _get_rpn_blobs(im_height, im_width, foas, all_anchors, gt_boxes):
total_anchors = all_anchors.shape[0]
straddle_thresh = cfg.TRAIN.RPN_STRADDLE_THRESH
if straddle_thresh >= 0:
# Only keep anchors inside the image by a margin of straddle_thresh
# Set TRAIN.RPN_STRADDLE_THRESH to -1 (or a large value) to keep all
# anchors
inds_inside = np.where(
(all_anchors[:, 0] >= -straddle_thresh) &
(all_anchors[:, 1] >= -straddle_thresh) &
(all_anchors[:, 2] < im_width + straddle_thresh) &
(all_anchors[:, 3] < im_height + straddle_thresh)
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
else:
inds_inside = np.arange(all_anchors.shape[0])
anchors = all_anchors
num_inside = len(inds_inside)
logger.debug('total_anchors: {}'.format(total_anchors))
logger.debug('inds_inside: {}'.format(num_inside))
logger.debug('anchors.shape: {}'.format(anchors.shape))
# Compute anchor labels:
# label=1 is positive, 0 is negative, -1 is don't care (ignore)
labels = np.empty((num_inside, ), dtype=np.int32)
labels.fill(-1)
if len(gt_boxes) > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = box_utils.bbox_overlaps(anchors, gt_boxes)
# Map from anchor to gt box that has highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),
anchor_to_gt_argmax]
# Map from gt box to an anchor that has highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])
]
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max
)[0]
# Fg label: for each gt use anchors with highest overlap
# (including ties)
labels[anchors_with_max_overlap] = 1
# Fg label: above threshold IOU
labels[anchor_to_gt_max >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCH_SIZE_PER_IM)
fg_inds = np.where(labels == 1)[0]
# print('fg_inds',fg_inds.size)
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False
)
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
# subsample negative labels if we have too many
# (samples with replacement, but since the set of bg inds is large most
# samples will not have repeats)
num_bg = cfg.TRAIN.RPN_BATCH_SIZE_PER_IM - np.sum(labels == 1)
bg_inds = np.where(anchor_to_gt_max < cfg.TRAIN.RPN_NEGATIVE_OVERLAP)[0]
# print('bg_ind',bg_inds.size)
if len(bg_inds) > num_bg:
enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]
else:
enable_inds = bg_inds
labels[enable_inds] = 0
bg_inds = np.where(labels == 0)[0]
bbox_targets = np.zeros((num_inside, 4), dtype=np.float32)
bbox_targets[fg_inds, :] = data_utils.compute_targets(
anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :]
)
# Bbox regression loss has the form:
# loss(x) = weight_outside * L(weight_inside * x)
# Inside weights allow us to set zero loss on an element-wise basis
# Bbox regression is only trained on positive examples so we set their
# weights to 1.0 (or otherwise if config is different) and 0 otherwise
bbox_inside_weights = np.zeros((num_inside, 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = (1.0, 1.0, 1.0, 1.0)
# The bbox regression loss only averages by the number of images in the
# mini-batch, whereas we need to average by the total number of example
# anchors selected
# Outside weights are used to scale each element-wise loss so the final
# average over the mini-batch is correct
bbox_outside_weights = np.zeros((num_inside, 4), dtype=np.float32)
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
bbox_outside_weights[labels == 1, :] = 1.0 / num_examples
bbox_outside_weights[labels == 0, :] = 1.0 / num_examples
# Map up to original set of anchors
labels = data_utils.unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = data_utils.unmap(
bbox_targets, total_anchors, inds_inside, fill=0
)
bbox_inside_weights = data_utils.unmap(
bbox_inside_weights, total_anchors, inds_inside, fill=0
)
bbox_outside_weights = data_utils.unmap(
bbox_outside_weights, total_anchors, inds_inside, fill=0
)
# Split the generated labels, etc. into labels per each field of anchors
blobs_out = []
start_idx = 0
for foa in foas:
H = foa.field_size
W = foa.field_size
A = foa.num_cell_anchors
end_idx = start_idx + H * W * A
_labels = labels[start_idx:end_idx]
_bbox_targets = bbox_targets[start_idx:end_idx, :]
_bbox_inside_weights = bbox_inside_weights[start_idx:end_idx, :]
_bbox_outside_weights = bbox_outside_weights[start_idx:end_idx, :]
start_idx = end_idx
# labels output with shape (1, A, height, width)
_labels = _labels.reshape((1, H, W, A)).transpose(0, 3, 1, 2)
# bbox_targets output with shape (1, 4 * A, height, width)
_bbox_targets = _bbox_targets.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_inside_weights output with shape (1, 4 * A, height, width)
_bbox_inside_weights = _bbox_inside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_outside_weights output with shape (1, 4 * A, height, width)
_bbox_outside_weights = _bbox_outside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
blobs_out.append(
dict(
rpn_labels_int32_wide=_labels,
rpn_bbox_targets_wide=_bbox_targets,
rpn_bbox_inside_weights_wide=_bbox_inside_weights,
rpn_bbox_outside_weights_wide=_bbox_outside_weights
)
)
return blobs_out[0] if len(blobs_out) == 1 else blobs_out
def add_deep_sup_rpn_blobs(blobs, im_scales, roidb):
"""Add blobs needed training RPN-only and end-to-end Faster R-CNN models."""
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# RPN applied to many feature levels, as in the FPN paper
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
foas = []
for lvl in range(k_min, k_max + 1):
field_stride = 2.**lvl
anchor_sizes = (cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), )
anchor_aspect_ratios = cfg.FPN.RPN_ASPECT_RATIOS
foa = data_utils.get_field_of_anchors(
field_stride, anchor_sizes, anchor_aspect_ratios
)
foas.append(foa)
all_anchors = np.concatenate([f.field_of_anchors for f in foas])
else:
foa = data_utils.get_field_of_anchors(
cfg.RPN.STRIDE, cfg.RPN.SIZES, cfg.RPN.ASPECT_RATIOS
)
all_anchors = foa.field_of_anchors
for im_i, entry in enumerate(roidb):
scale = im_scales[im_i]
im_height = np.round(entry['height'] * scale)
im_width = np.round(entry['width'] * scale)
gt_inds = np.where(
(entry['gt_classes'] > 0) & (entry['is_crowd'] == 0)
)[0]
gt_rois = entry['boxes'][gt_inds, :] * scale
im_info = np.array([[im_height, im_width, scale]], dtype=np.float32)
blobs['deep_sup_rpn_im_info'].append(im_info)
# Add RPN targets
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_RPN:
# RPN applied to many feature levels, as in the FPN paper
rpn_blobs = _get_deep_sup_rpn_blobs(
im_height, im_width, foas, all_anchors, gt_rois
)
for i, lvl in enumerate(range(k_min, k_max + 1)):
for k, v in rpn_blobs[i].items():
blobs[k + '_fpn' + str(lvl)].append(v)
else:
# Classical RPN, applied to a single feature level
rpn_blobs = _get_deep_sup_rpn_blobs(
im_height, im_width, [foa], all_anchors, gt_rois
)
for k, v in rpn_blobs.items():
blobs[k].append(v)
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
valid_keys = [
'has_visible_keypoints', 'boxes', 'segms', 'seg_areas', 'gt_classes',
'gt_overlaps', 'is_crowd', 'box_to_gt_ind_map', 'gt_keypoints'
]
minimal_roidb = [{} for _ in range(len(roidb))]
for i, e in enumerate(roidb):
for k in valid_keys:
if k in e:
minimal_roidb[i][k] = e[k]
blobs['deep_sup_rpn_roidb'] = blob_utils.serialize(minimal_roidb)
# Always return valid=True, since RPN minibatches are valid by design
return True
def _get_deep_sup_rpn_blobs(im_height, im_width, foas, all_anchors, gt_boxes):
total_anchors = all_anchors.shape[0]
straddle_thresh = cfg.TRAIN.RPN_STRADDLE_THRESH
if straddle_thresh >= 0:
# Only keep anchors inside the image by a margin of straddle_thresh
# Set TRAIN.RPN_STRADDLE_THRESH to -1 (or a large value) to keep all
# anchors
inds_inside = np.where(
(all_anchors[:, 0] >= -straddle_thresh) &
(all_anchors[:, 1] >= -straddle_thresh) &
(all_anchors[:, 2] < im_width + straddle_thresh) &
(all_anchors[:, 3] < im_height + straddle_thresh)
)[0]
# keep only inside anchors
anchors = all_anchors[inds_inside, :]
else:
inds_inside = np.arange(all_anchors.shape[0])
anchors = all_anchors
num_inside = len(inds_inside)
logger.debug('total_anchors: {}'.format(total_anchors))
logger.debug('inds_inside: {}'.format(num_inside))
logger.debug('anchors.shape: {}'.format(anchors.shape))
# Compute anchor labels:
# label=1 is positive, 0 is negative, -1 is don't care (ignore)
labels = np.empty((num_inside, ), dtype=np.int32)
labels.fill(-1)
if len(gt_boxes) > 0:
# Compute overlaps between the anchors and the gt boxes overlaps
anchor_by_gt_overlap = box_utils.bbox_overlaps(anchors, gt_boxes)
# Map from anchor to gt box that has highest overlap
anchor_to_gt_argmax = anchor_by_gt_overlap.argmax(axis=1)
# For each anchor, amount of overlap with most overlapping gt box
anchor_to_gt_max = anchor_by_gt_overlap[np.arange(num_inside),
anchor_to_gt_argmax]
# Map from gt box to an anchor that has highest overlap
gt_to_anchor_argmax = anchor_by_gt_overlap.argmax(axis=0)
# For each gt box, amount of overlap with most overlapping anchor
gt_to_anchor_max = anchor_by_gt_overlap[
gt_to_anchor_argmax,
np.arange(anchor_by_gt_overlap.shape[1])
]
# Find all anchors that share the max overlap amount
# (this includes many ties)
anchors_with_max_overlap = np.where(
anchor_by_gt_overlap == gt_to_anchor_max
)[0]
# Fg label: for each gt use anchors with highest overlap
# (including ties)
labels[anchors_with_max_overlap] = 1
# Fg label: above threshold IOU
labels[anchor_to_gt_max >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
# subsample positive labels if we have too many
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCH_SIZE_PER_IM)
fg_inds = np.where(labels == 1)[0]
# print('fg_inds',fg_inds.size)
if len(fg_inds) > num_fg:
disable_inds = npr.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False
)
labels[disable_inds] = -1
fg_inds = np.where(labels == 1)[0]
# subsample negative labels if we have too many
# (samples with replacement, but since the set of bg inds is large most
# samples will not have repeats)
num_bg = cfg.TRAIN.RPN_BATCH_SIZE_PER_IM - np.sum(labels == 1)
bg_inds = np.where(anchor_to_gt_max < cfg.TRAIN.RPN_NEGATIVE_OVERLAP)[0]
# print('bg_ind',bg_inds.size)
if len(bg_inds) > num_bg:
enable_inds = bg_inds[npr.randint(len(bg_inds), size=num_bg)]
else:
enable_inds = bg_inds
labels[enable_inds] = 0
bg_inds = np.where(labels == 0)[0]
bbox_targets = np.zeros((num_inside, 4), dtype=np.float32)
bbox_targets[fg_inds, :] = data_utils.compute_targets(
anchors[fg_inds, :], gt_boxes[anchor_to_gt_argmax[fg_inds], :]
)
# Bbox regression loss has the form:
# loss(x) = weight_outside * L(weight_inside * x)
# Inside weights allow us to set zero loss on an element-wise basis
# Bbox regression is only trained on positive examples so we set their
# weights to 1.0 (or otherwise if config is different) and 0 otherwise
bbox_inside_weights = np.zeros((num_inside, 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = (1.0, 1.0, 1.0, 1.0)
# The bbox regression loss only averages by the number of images in the
# mini-batch, whereas we need to average by the total number of example
# anchors selected
# Outside weights are used to scale each element-wise loss so the final
# average over the mini-batch is correct
bbox_outside_weights = np.zeros((num_inside, 4), dtype=np.float32)
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
bbox_outside_weights[labels == 1, :] = 1.0 / num_examples
bbox_outside_weights[labels == 0, :] = 1.0 / num_examples
# Map up to original set of anchors
labels = data_utils.unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = data_utils.unmap(
bbox_targets, total_anchors, inds_inside, fill=0
)
bbox_inside_weights = data_utils.unmap(
bbox_inside_weights, total_anchors, inds_inside, fill=0
)
bbox_outside_weights = data_utils.unmap(
bbox_outside_weights, total_anchors, inds_inside, fill=0
)
# Split the generated labels, etc. into labels per each field of anchors
blobs_out = []
start_idx = 0
for foa in foas:
H = foa.field_size
W = foa.field_size
A = foa.num_cell_anchors
end_idx = start_idx + H * W * A
_labels = labels[start_idx:end_idx]
_bbox_targets = bbox_targets[start_idx:end_idx, :]
_bbox_inside_weights = bbox_inside_weights[start_idx:end_idx, :]
_bbox_outside_weights = bbox_outside_weights[start_idx:end_idx, :]
start_idx = end_idx
# labels output with shape (1, A, height, width)
_labels = _labels.reshape((1, H, W, A)).transpose(0, 3, 1, 2)
# bbox_targets output with shape (1, 4 * A, height, width)
_bbox_targets = _bbox_targets.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_inside_weights output with shape (1, 4 * A, height, width)
_bbox_inside_weights = _bbox_inside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
# bbox_outside_weights output with shape (1, 4 * A, height, width)
_bbox_outside_weights = _bbox_outside_weights.reshape(
(1, H, W, A * 4)).transpose(0, 3, 1, 2)
blobs_out.append(
dict(
deep_sup_rpn_labels_int32_wide=_labels,
deep_sup_rpn_bbox_targets_wide=_bbox_targets,
deep_sup_rpn_bbox_inside_weights_wide=_bbox_inside_weights,
deep_sup_rpn_bbox_outside_weights_wide=_bbox_outside_weights
)
)
return blobs_out[0] if len(blobs_out) == 1 else blobs_out
| 42.636364
| 80
| 0.634684
|
4a06f8230d553ec860607bc2e46c010a31b57713
| 1,148
|
py
|
Python
|
main.py
|
julien-klaus/karel_the_robot
|
a9faa528330152e0a7862e9f2b6727abfb6d3c31
|
[
"MIT"
] | null | null | null |
main.py
|
julien-klaus/karel_the_robot
|
a9faa528330152e0a7862e9f2b6727abfb6d3c31
|
[
"MIT"
] | null | null | null |
main.py
|
julien-klaus/karel_the_robot
|
a9faa528330152e0a7862e9f2b6727abfb6d3c31
|
[
"MIT"
] | null | null | null |
# author: Julien Klaus
import os
from time import time
from compiler.interpreter import Interpreter
if __name__ == "__main__":
path = "test"
test_cases = 0
correct = 0
print("Start checking test cases ...")
start = time()
for file in os.listdir(path):
if file.endswith("in"):
i = Interpreter(path, file)
results = i.get_results()
# check if results are correct
answers = []
with open(os.path.join(path, f"{file[:-3]}.ans"), "r") as answer_file:
line = answer_file.readline().strip()
while line:
answers.append(tuple(line.split(" ")))
line = answer_file.readline().strip()
for result, answer in zip(results, answers):
test_cases += 1
if not (result[0] == int(answer[0]) and result[1] == int(answer[1]) and result[2] == answer[2]):
print("Test case[s] not correct in", file)
else:
correct += 1
print(f"{time()-start}s")
print(f"... {correct} of {test_cases} done correct.")
| 33.764706
| 112
| 0.528746
|
4a06f888848300de5678036974a8b4a2e85d4304
| 1,163
|
py
|
Python
|
utils/distributions.py
|
petersvenningsson/FAR
|
f5b994339c0d55e56b003ace9a9f72e384ede91d
|
[
"Apache-2.0"
] | 1
|
2022-02-01T20:42:28.000Z
|
2022-02-01T20:42:28.000Z
|
utils/distributions.py
|
petersvenningsson/FAR
|
f5b994339c0d55e56b003ace9a9f72e384ede91d
|
[
"Apache-2.0"
] | null | null | null |
utils/distributions.py
|
petersvenningsson/FAR
|
f5b994339c0d55e56b003ace9a9f72e384ede91d
|
[
"Apache-2.0"
] | 1
|
2022-02-01T20:42:36.000Z
|
2022-02-01T20:42:36.000Z
|
###########
# IMPORTS #
###########
# 3rd-party
import numpy as np
from scipy import stats
###########
# CLASSES #
###########
class Distribution:
def __init__(self, obj, **kwargs):
self.distribution_object = obj(**kwargs)
self.params = kwargs
self.dimensionality = len(self.draw())
def draw(self):
return self.distribution_object.rvs()
def likelihood(self, x):
assert len(x) == self.dimensionality
return self.distribution_object.pdf(x)
###########
# GLOBALS #
###########
mean_A = [0.75, -0.25]
mean_B = [1, 0.25]
mean_C = [-0.5, 2]
cov_A = [[1.25 / 2, 0], [0, 1.25 / 2]]
cov_B = [[1.25 / 2, 0], [0, 1.25 / 2]]
cov_C = [[1, 0.5], [0.5, 1.25]]
class_A = Distribution(stats.multivariate_normal, **{"mean": mean_A, "cov": cov_A})
class_B = Distribution(stats.multivariate_normal, **{"mean": mean_B, "cov": cov_B})
class_C = Distribution(stats.multivariate_normal, **{"mean": mean_C, "cov": cov_C})
##########
# SCRIPT #
##########
if __name__ == "__main__":
dist = stats.multivariate_normal
arg = {"loc": 1, "scale": 2}
mydist = Distribution(dist, **arg)
print(mydist.draw())
| 22.365385
| 83
| 0.572657
|
4a06f88c9ddeaa3912905baff5a302eac980b938
| 4,977
|
py
|
Python
|
iimmpact/__init__.py
|
iimmpact/iimmpact_python_api
|
d1de12df3eb984ebc7f6adef800d7cb0265e173e
|
[
"MIT"
] | null | null | null |
iimmpact/__init__.py
|
iimmpact/iimmpact_python_api
|
d1de12df3eb984ebc7f6adef800d7cb0265e173e
|
[
"MIT"
] | null | null | null |
iimmpact/__init__.py
|
iimmpact/iimmpact_python_api
|
d1de12df3eb984ebc7f6adef800d7cb0265e173e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
IIMMPACT API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2020-09-14T13:01:14Z
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from iimmpact.api.authorization_token_api import AuthorizationTokenApi
from iimmpact.api.callback_url_api import CallbackUrlApi
from iimmpact.api.car_insurance_api import CarInsuranceApi
from iimmpact.api.jpj_records_api import JPJRecordsApi
from iimmpact.api.low_balance_warning_api import LowBalanceWarningApi
from iimmpact.api.my_account_api import MyAccountApi
from iimmpact.api.product_enquiry_api import ProductEnquiryApi
from iimmpact.api.services_api import ServicesApi
from iimmpact.api.transaction_history_api import TransactionHistoryApi
# import ApiClient
from iimmpact.api_client import ApiClient
from iimmpact.configuration import Configuration
# import models into sdk package
from iimmpact.models.balance_response import BalanceResponse
from iimmpact.models.balance_response_data import BalanceResponseData
from iimmpact.models.balance_statement_response import BalanceStatementResponse
from iimmpact.models.balance_statement_response_data import BalanceStatementResponseData
from iimmpact.models.balance_statement_response_links import BalanceStatementResponseLinks
from iimmpact.models.balance_statement_response_meta import BalanceStatementResponseMeta
from iimmpact.models.bill_presentment_response import BillPresentmentResponse
from iimmpact.models.bill_presentment_response_data import BillPresentmentResponseData
from iimmpact.models.bill_presentment_response_metadata import BillPresentmentResponseMetadata
from iimmpact.models.callback_url_response import CallbackUrlResponse
from iimmpact.models.callback_url_response_data import CallbackUrlResponseData
from iimmpact.models.callback_url_response_metadata import CallbackUrlResponseMetadata
from iimmpact.models.car_insurance_respone import CarInsuranceRespone
from iimmpact.models.car_insurance_respone_variant import CarInsuranceResponeVariant
from iimmpact.models.change_password_request import ChangePasswordRequest
from iimmpact.models.deposit_request import DepositRequest
from iimmpact.models.driving_license_respone import DrivingLicenseRespone
from iimmpact.models.driving_license_respone_inner import DrivingLicenseResponeInner
from iimmpact.models.driving_test_respone import DrivingTestRespone
from iimmpact.models.driving_test_respone_enquiry_test_part1 import DrivingTestResponeEnquiryTestPart1
from iimmpact.models.empty import Empty
from iimmpact.models.error import Error
from iimmpact.models.inline_response200 import InlineResponse200
from iimmpact.models.inline_response2001 import InlineResponse2001
from iimmpact.models.inline_response2002 import InlineResponse2002
from iimmpact.models.inline_response2002_data import InlineResponse2002Data
from iimmpact.models.inline_response200_data import InlineResponse200Data
from iimmpact.models.jpj_records_response import JPJRecordsResponse
from iimmpact.models.jpj_summons_response import JPJSummonsResponse
from iimmpact.models.low_balance_warning_response import LowBalanceWarningResponse
from iimmpact.models.low_balance_warning_response_data import LowBalanceWarningResponseData
from iimmpact.models.low_balance_warning_response_metadata import LowBalanceWarningResponseMetadata
from iimmpact.models.network_status_response import NetworkStatusResponse
from iimmpact.models.network_status_response_data import NetworkStatusResponseData
from iimmpact.models.network_status_response_metadata import NetworkStatusResponseMetadata
from iimmpact.models.new_password_request import NewPasswordRequest
from iimmpact.models.new_password_responses import NewPasswordResponses
from iimmpact.models.only_message_respone import OnlyMessageRespone
from iimmpact.models.refresh_token_request import RefreshTokenRequest
from iimmpact.models.token_request import TokenRequest
from iimmpact.models.token_response import TokenResponse
from iimmpact.models.token_response_authentication_result import TokenResponseAuthenticationResult
from iimmpact.models.topup_request import TopupRequest
from iimmpact.models.topup_response import TopupResponse
from iimmpact.models.topup_response_data import TopupResponseData
from iimmpact.models.transactions_response import TransactionsResponse
from iimmpact.models.transactions_response_balance import TransactionsResponseBalance
from iimmpact.models.transactions_response_data import TransactionsResponseData
from iimmpact.models.transactions_response_meta import TransactionsResponseMeta
from iimmpact.models.transactions_response_product import TransactionsResponseProduct
from iimmpact.models.transactions_response_status import TransactionsResponseStatus
from iimmpact.models.vehicle_expiry_response import VehicleExpiryResponse
| 58.552941
| 119
| 0.900743
|
4a06fa7f0108ca38c031b212249cdf6d3ccdb4fc
| 41,961
|
py
|
Python
|
guided_diffusion/gaussian_diffusion.py
|
JacobBunker/guided-diffusion
|
b7b10c7b5224b09d75664d20ef23daee0cc93737
|
[
"MIT"
] | 89
|
2021-08-02T19:22:09.000Z
|
2022-03-31T14:48:37.000Z
|
guided_diffusion/gaussian_diffusion.py
|
JacobBunker/guided-diffusion
|
b7b10c7b5224b09d75664d20ef23daee0cc93737
|
[
"MIT"
] | null | null | null |
guided_diffusion/gaussian_diffusion.py
|
JacobBunker/guided-diffusion
|
b7b10c7b5224b09d75664d20ef23daee0cc93737
|
[
"MIT"
] | 26
|
2021-08-09T16:03:18.000Z
|
2022-03-30T23:03:09.000Z
|
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch as th
from .nn import mean_flat
from .losses import normal_kl, discretized_gaussian_log_likelihood
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, p_mean_var, **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, t, p_mean_var, **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean_with_grad(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = self.p_sample_with_grad if cond_fn_with_grad else self.p_sample
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out_orig, x, t, model_kwargs=model_kwargs)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"]}
def ddim_sample_with_grad(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out_orig = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score_with_grad(cond_fn, out_orig, x, t,
model_kwargs=model_kwargs)
else:
out = out_orig
out["pred_xstart"] = out["pred_xstart"].detach()
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and 'y' in model_kwargs:
model_kwargs['y'] = th.randint(low=0, high=model.num_classes,
size=model_kwargs['y'].shape,
device=model_kwargs['y'].device)
with th.no_grad():
sample_fn = self.ddim_sample_with_grad if cond_fn_with_grad else self.ddim_sample
out = sample_fn(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
| 37.973756
| 129
| 0.580777
|
4a06fb412470b498a967a1f5135e2df34c66fa13
| 9,582
|
py
|
Python
|
common/xrd-ui-tests-python/tests/xroad_ss_import_certificate_from_token/XroadImportCertFromTokenSigning.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 3
|
2018-03-15T14:22:50.000Z
|
2021-11-08T10:30:35.000Z
|
common/xrd-ui-tests-python/tests/xroad_ss_import_certificate_from_token/XroadImportCertFromTokenSigning.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 11
|
2017-04-06T09:25:41.000Z
|
2018-06-04T09:08:48.000Z
|
common/xrd-ui-tests-python/tests/xroad_ss_import_certificate_from_token/XroadImportCertFromTokenSigning.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 20
|
2017-03-14T07:21:58.000Z
|
2019-05-21T09:26:30.000Z
|
import time
import unittest
from selenium.webdriver.common.by import By
from helpers import ssh_client
from main.maincontroller import MainController
from tests.xroad_cs_ca import ca_management
from tests.xroad_cs_ca.ca_management import test_add_ca
from tests.xroad_cs_ocsp_responder import ocsp_responder
from tests.xroad_cs_ocsp_responder.ocsp_responder import test_add_ocsp_responder
from tests.xroad_ss_client.ss_client_management import add_ss_client, delete_client, edit_client
from tests.xroad_ss_client_certification_213.client_certification import start_xroad_conf_client, \
expire_global_conf
from tests.xroad_ss_import_certificate_from_token import xroad_import_cert_token
from tests.xroad_ss_import_certificate_from_token.xroad_import_cert_token import test_import_cert_from_token
from view_models.clients_table_vm import get_client_row_element
from view_models.popups import WARNING_POPUP_CONTINUE_XPATH, CONFIRM_POPUP_CANCEL_BTN_XPATH, \
YESNO_POPUP_NO_BTN_XPATH, close_all_open_dialogs
class XroadImportCertFromTokenSigning(unittest.TestCase):
"""
SS_31 1-16, 2a, 4a, 5a, 6a, 7a, 7c, 8a, 9a, 10a, 13a Import certificate from security token(sign)
RIA URL: https://jira.ria.ee/browse/XTKB-121
Depends on finishing other test(s):
Requires helper scenarios: MEMBER_47, TRUST_08, TRUST_10, TRUST_14
X-Road version: 6.16.0
"""
def test_aimport_cert_from_token(self):
main = MainController(self)
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
ca_ssh_host = main.config.get('ca.ssh_host')
ca_ssh_user = main.config.get('ca.ssh_user')
ca_ssh_pass = main.config.get('ca.ssh_pass')
token_name = main.config.get('utimaco.token_name')
import_cert_from_token = test_import_cert_from_token(main, ss_ssh_host, ss_ssh_user, ss_ssh_pass, token_name,
ss_host=ss_host, ss_user=ss_user, ss_pass=ss_pass,
already_exists_error=True, expired_cert_error=True,
auth_cert_sign_key_error=True)
generate_certs_to_hsm = xroad_import_cert_token.generate_certs_to_hsm(main, ca_ssh_host, ca_ssh_user,
ca_ssh_pass, ss_ssh_host, ss_ssh_user,
ss_ssh_pass, token_name)
member_code = subsystem_code = 'test'
member_class = 'COM'
try:
main.reload_webdriver(ss_host, ss_user, ss_pass)
main.log('Adding test client to ss')
add_ss_client(main, member_code, member_class, subsystem_code)
main.wait_until_visible(type=By.XPATH, element=WARNING_POPUP_CONTINUE_XPATH).click()
main.wait_until_visible(type=By.XPATH, element=CONFIRM_POPUP_CANCEL_BTN_XPATH).click()
working_cert_id, auth_cert_id, expired_cert_id, existing_cert_id = generate_certs_to_hsm()
main.log('Waiting until signer update')
time.sleep(60)
main.reset_page()
import_cert_from_token(working_cert_id, auth_cert_id, expired_cert_id, existing_cert_id)
finally:
main.tearDown()
def test_bimport_cert_from_token_global_conf_error(self):
main = MainController(self)
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
sshclient = ssh_client.SSHClient(ss_ssh_host, ss_ssh_user, ss_ssh_pass)
expire_globalconf = expire_global_conf(main, sshclient)
import_cert_from_token_global_conf_error = test_import_cert_from_token(main, ss_ssh_host, ss_ssh_user,
ss_ssh_pass, global_conf_error=True)
start_conf_client = start_xroad_conf_client(main, sshclient)
try:
main.reload_webdriver(ss_host, ss_user, ss_pass)
expire_globalconf()
import_cert_from_token_global_conf_error()
finally:
start_conf_client()
main.log('Waiting until global configuration is up to date')
time.sleep(60)
main.tearDown()
def test_cimport_cert_from_token_no_ca(self):
main = MainController(self)
cs_host = main.config.get('cs.host')
cs_user = main.config.get('cs.user')
cs_pass = main.config.get('cs.pass')
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
import_cert_from_token_no_ca_error = test_import_cert_from_token(main, ss_ssh_host, ss_ssh_user,
ss_ssh_pass, no_ca_error=True)
ca_certificate_filename = 'ca.cert.pem'
certificate_classpath = main.config.get('ca.profile_class')
ca_ssh_host = main.config.get('ca.ssh_host')
ca_ssh_user = main.config.get('ca.ssh_user')
ca_ssh_pass = main.config.get('ca.ssh_pass')
ocsp_responder.ca_get_certificates(main, ca_ssh_host, ca_ssh_user, ca_ssh_pass, [ca_certificate_filename])
ca_certificate = main.get_download_path(ca_certificate_filename)
invalid_ca_certificate = main.get_download_path('INFO')
restore_ca = test_add_ca(case=main, ca_certificate=ca_certificate,
invalid_ca_certificate=invalid_ca_certificate,
certificate_classpath=certificate_classpath)
ca_name = main.config.get('ca.host')
ocsp_url = main.config.get('ca.ocs_host')
ocsp_cert_filename = 'ocsp.cert.pem'
main.log('Getting CA certificates from {0}'.format(ca_ssh_host))
ocsp_responder.ca_get_certificates(main, ca_ssh_host, ca_ssh_user, ca_ssh_pass, filenames=[ocsp_cert_filename])
certificate_filename = main.get_download_path(ocsp_cert_filename)
restore_ocsp = test_add_ocsp_responder(case=main, ca_name=ca_name, ocsp_url=ocsp_url,
certificate_filename=certificate_filename)
delete_ca = ca_management.test_delete_ca(case=main, ca_name=ca_name)
try:
main.reload_webdriver(cs_host, cs_user, cs_pass)
delete_ca()
main.log('Wait 120 seconds for changes')
time.sleep(120)
main.reload_webdriver(ss_host, ss_user, ss_pass)
import_cert_from_token_no_ca_error()
finally:
main.reload_webdriver(cs_host, cs_user, cs_pass)
restore_ca()
close_all_open_dialogs(main)
restore_ocsp()
time.sleep(120)
main.tearDown()
def test_dimport_cert_from_token_no_client(self):
main = MainController(self)
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
import_cert_from_token_no_key_error = test_import_cert_from_token(main, ss_ssh_host, ss_ssh_user,
ss_ssh_pass, no_client_error=True)
test_client_id = main.config.get('ss2.test_client_id')
try:
main.log('Deleting test client')
main.reload_webdriver(ss_host, ss_user, ss_pass)
client_row = get_client_row_element(main, client_id=test_client_id)
edit_client(main, client_row)
delete_client(main, False)
main.wait_until_visible(type=By.XPATH, element=YESNO_POPUP_NO_BTN_XPATH).click()
main.wait_jquery()
import_cert_from_token_no_key_error()
finally:
main.tearDown()
def test_eimport_cert_from_token_no_key(self):
main = MainController(self)
ss_host = main.config.get('ss2.host')
ss_user = main.config.get('ss2.user')
ss_pass = main.config.get('ss2.pass')
ss_ssh_host = main.config.get('ss2.ssh_host')
ss_ssh_user = main.config.get('ss2.ssh_user')
ss_ssh_pass = main.config.get('ss2.ssh_pass')
token_name = main.config.get('utimaco.token_name')
import_cert_from_token_no_key_error = test_import_cert_from_token(main, ss_ssh_host, ss_ssh_user,
ss_ssh_pass, ss_host=ss_host, ss_user=ss_user,
ss_pass=ss_pass, no_key_error=True, token_name=token_name)
try:
main.reload_webdriver(ss_host, ss_user, ss_pass)
import_cert_from_token_no_key_error()
finally:
main.tearDown()
| 49.138462
| 132
| 0.648299
|
4a06fb75841229e87760991c725d4c099eaf0e7d
| 1,278
|
py
|
Python
|
test/python/smiles.py
|
timvdm/Helium
|
79db85da43f20606710263f800deac52534d437e
|
[
"BSD-3-Clause"
] | 13
|
2015-02-04T17:02:25.000Z
|
2018-04-25T22:48:52.000Z
|
test/python/smiles.py
|
timvdm/Helium
|
79db85da43f20606710263f800deac52534d437e
|
[
"BSD-3-Clause"
] | null | null | null |
test/python/smiles.py
|
timvdm/Helium
|
79db85da43f20606710263f800deac52534d437e
|
[
"BSD-3-Clause"
] | 4
|
2015-11-27T06:19:40.000Z
|
2021-04-20T17:35:41.000Z
|
import helium
import unittest
SMILES = helium.Smiles()
class TestSmiles(unittest.TestCase):
def test_read_valid(self):
mol = helium.Molecule()
self.assertTrue(SMILES.read('CCC', mol))
self.assertFalse(SMILES.error())
def test_read_invalid(self):
mol = helium.Molecule()
self.assertFalse(SMILES.read('dfgd', mol))
self.assertTrue(SMILES.error())
self.assertNotEqual(0, len(str(SMILES.error())))
def test_write(self):
mol = helium.Molecule()
self.assertTrue(SMILES.read('C=C', mol))
self.assertEqual('C=C', SMILES.write(mol))
self.assertEqual('CC', SMILES.write(mol, helium.Smiles.Flags.None))
def test_canonical(self):
mol1 = helium.Molecule()
mol2 = helium.Molecule()
self.assertTrue(SMILES.read('C=O', mol1))
self.assertTrue(SMILES.read('O=C', mol2))
self.assertEqual(SMILES.writeCanonical(mol1), SMILES.writeCanonical(mol2))
self.assertEqual(SMILES.write(mol1, [0, 1]), SMILES.write(mol2, [1, 0]))
self.assertEqual('CO', SMILES.write(mol1, [0, 1], helium.Smiles.Flags.None))
self.assertEqual('CO', SMILES.write(mol2, [1, 0], helium.Smiles.Flags.None))
if __name__ == '__main__':
unittest.main()
| 32.769231
| 84
| 0.640845
|
4a06fd62ded1020bd7b7709129e49c023014756a
| 2,382
|
py
|
Python
|
mars/tests/conftest.py
|
trotsky1997/mars
|
315b94ade1489d4fdfd351f17263fbc1d4c47008
|
[
"Apache-2.0"
] | null | null | null |
mars/tests/conftest.py
|
trotsky1997/mars
|
315b94ade1489d4fdfd351f17263fbc1d4c47008
|
[
"Apache-2.0"
] | null | null | null |
mars/tests/conftest.py
|
trotsky1997/mars
|
315b94ade1489d4fdfd351f17263fbc1d4c47008
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import pytest
from mars.serialization.ray import register_ray_serializers, unregister_ray_serializers
from mars.oscar.backends.router import Router
from mars.oscar.backends.ray.communication import RayServer
from mars.utils import lazy_import
ray = lazy_import('ray')
@pytest.fixture
def ray_start_regular(request):
param = getattr(request, "param", {})
if not param.get('enable', True):
yield
else:
register_ray_serializers()
try:
yield ray.init(num_cpus=20)
finally:
ray.shutdown()
unregister_ray_serializers()
Router.set_instance(None)
RayServer.clear()
if 'COV_CORE_SOURCE' in os.environ:
# Remove this when https://github.com/ray-project/ray/issues/16802 got fixed
subprocess.check_call(["ray", "stop", "--force"])
@pytest.fixture
def ray_large_cluster():
try:
from ray.cluster_utils import Cluster
except ModuleNotFoundError:
from ray._private.cluster_utils import Cluster
cluster = Cluster()
remote_nodes = []
num_nodes = 3
for i in range(num_nodes):
remote_nodes.append(cluster.add_node(num_cpus=10))
if len(remote_nodes) == 1:
ray.init(address=cluster.address)
register_ray_serializers()
try:
yield
finally:
unregister_ray_serializers()
Router.set_instance(None)
RayServer.clear()
ray.shutdown()
cluster.shutdown()
if 'COV_CORE_SOURCE' in os.environ:
# Remove this when https://github.com/ray-project/ray/issues/16802 got fixed
subprocess.check_call(["ray", "stop", "--force"])
__all__ = ['ray_start_regular', 'ray_large_cluster']
| 31.76
| 92
| 0.68094
|
4a06fd6dfe8e8267b3ad96b730e8379a234d58a1
| 4,242
|
py
|
Python
|
alibabacloud/utils/client_supports.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | null | null | null |
alibabacloud/utils/client_supports.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | null | null | null |
alibabacloud/utils/client_supports.py
|
wallisyan/alibabacloud-python-sdk-v2
|
6e024c97cded2403025a7dd8fea8261e41872156
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Alibaba Cloud Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import time
from alibabacloud.client import AlibabaCloudClient
ALIBABACLOUD_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CLIENTS_DATA_PATH = os.path.join(ALIBABACLOUD_ROOT, 'clients')
SERVICES_DATA_PATH = os.path.join(ALIBABACLOUD_ROOT, 'services')
def _is_subclass(sub_object, f_object, sub=1):
if sub_object is f_object:
return
if sub_object.__mro__[sub] is f_object:
return sub_object.__name__
def _get_client_classes(path):
for name, obj in inspect.getmembers(path):
if inspect.isclass(obj):
if _is_subclass(obj, AlibabaCloudClient):
return obj.__name__
def _format_api_version(api_version):
return time.strftime("%Y-%m-%d", time.strptime(api_version, '%Y%m%d'))
def _list_available_client_services():
# find py file ,get name ,split
services = dict()
for root, _, files in os.walk(CLIENTS_DATA_PATH):
if root.endswith('clients'):
if '__init__.py' in files:
files.remove('__init__.py')
for file in files:
if file.endswith('.py'):
module_name = file.rstrip('.py')
service_name, api_version = module_name.rsplit('_', 1)
api_version = _format_api_version(api_version)
client_module = __import__(
'.'.join(['alibabacloud', 'clients', module_name]), globals(), locals(),
['clients', module_name], 0)
client_name = _get_client_classes(client_module)
if service_name not in services:
services[service_name] = (client_name, [api_version])
elif api_version not in services[service_name][1]:
services[service_name][1].append(api_version)
return services
def _get_resources_classes(path, sub):
services = dict()
services_file = path.__name__.split("\\")[-1].split(".")[-1]
from alibabacloud.resources.base import ServiceResource
for class_name, obj in inspect.getmembers(path):
if inspect.isclass(obj):
if _is_subclass(obj, ServiceResource, sub):
try:
services[getattr(obj(""), "_service_name")] = obj, services_file
except TypeError:
services[getattr(obj("", ""), "_service_name")] = obj, services_file
except AttributeError:
services[services_file.lstrip("_")] = obj, services_file
return services
def _read_dir_get_resources_info(generator_files, sub=1):
services = dict()
for file in generator_files:
if file.endswith('.py'):
module_name = file.rstrip('.py')
services_module = __import__(
'.'.join(['alibabacloud', 'services', module_name]), globals(), locals(),
['services', module_name], 0)
service = _get_resources_classes(services_module, sub)
services.update(service)
return services
def _list_available_resource_services():
services = dict()
for root, _, files in os.walk(SERVICES_DATA_PATH):
if root.endswith('services'):
if '__init__.py' in files:
files.remove('__init__.py')
generator_files = [file for file in files if file.startswith("_")]
services.update(_read_dir_get_resources_info(generator_files))
modified_files = set(files) - set(generator_files)
services.update(_read_dir_get_resources_info(modified_files, sub=2))
return services
| 39.277778
| 96
| 0.640736
|
4a06fd7f51f7f9d23f6f06a2e39b094b8c91a500
| 1,738
|
py
|
Python
|
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DeleteScalingRuleRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DeleteScalingRuleRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DeleteScalingRuleRequest.py
|
liumihust/aliyun-openapi-python-sdk
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteScalingRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DeleteScalingRule','ess')
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_ScalingRuleId(self):
return self.get_query_params().get('ScalingRuleId')
def set_ScalingRuleId(self,ScalingRuleId):
self.add_query_param('ScalingRuleId',ScalingRuleId)
| 36.208333
| 76
| 0.774453
|
4a06fe062632ebb5d7abaefbcc688cb92f7374f3
| 945
|
py
|
Python
|
setup.py
|
garne041/chem_calc
|
cad35c9444874b62ea85c36a7fa011a59274c095
|
[
"MIT"
] | 1
|
2020-09-25T01:32:20.000Z
|
2020-09-25T01:32:20.000Z
|
setup.py
|
garne041/chem_calc
|
cad35c9444874b62ea85c36a7fa011a59274c095
|
[
"MIT"
] | null | null | null |
setup.py
|
garne041/chem_calc
|
cad35c9444874b62ea85c36a7fa011a59274c095
|
[
"MIT"
] | null | null | null |
import setuptools
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding = 'utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name="chem_calc", # Replace with your own username
version="0.0.3",
author="Joy Garnett",
author_email="garne041@gmail.com",
description="A package to calculate various stoichometric features of compounds.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/garne041/chem_calc",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
install_requires = ['numpy', 'pandas'],
setup_requires = ['numpy', 'pandas'],
python_requires='>=3.2',
)
| 35
| 86
| 0.691005
|
4a06fe727a9cf6651ffa17cf0dd558bae879c6ce
| 5,027
|
py
|
Python
|
orchestra/contrib/settings/parser.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 68
|
2015-02-09T10:28:44.000Z
|
2022-03-12T11:08:36.000Z
|
orchestra/contrib/settings/parser.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 17
|
2015-05-01T18:10:03.000Z
|
2021-03-19T21:52:55.000Z
|
orchestra/contrib/settings/parser.py
|
RubenPX/django-orchestra
|
5ab4779e1ae12ec99569d682601b7810587ed381
|
[
"Unlicense"
] | 29
|
2015-03-31T04:51:03.000Z
|
2022-02-17T02:58:50.000Z
|
import ast
import copy
import json
import os
import re
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import Promise
from orchestra.utils.paths import get_project_dir
from . import Setting
class Remove(object):
""" used to signal a setting remove """
pass
def get_settings_file():
return os.path.join(get_project_dir(), 'settings.py')
def _find_updates(changes, settings_file):
""" find all updates needed for applying changes on settings_file content """
with open(settings_file, 'rb') as handler:
p = ast.parse(handler.read())
updates = []
for elem in p.body:
if updates and updates[-1][-1] is None:
updates[-1][-1] = elem.lineno-1
targets = getattr(elem, 'targets', None)
if targets:
var_name = targets[0].id
if var_name in changes:
updates.append([var_name, elem.lineno, None])
return updates
class LazyUgettextRepr(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return '_("%s")' % self.value
def __len__(self):
return len(repr(self.value))
class NotSupported(object):
def __repr__(self):
return 'Serialization not supported'
def __len__(self):
return 0
def get_eval_context():
return {
'NotSupported': NotSupported,
'_': _,
}
def serialize(obj, init=True):
if isinstance(obj, NotSupported):
return obj
elif isinstance(obj, Promise):
_obj = LazyUgettextRepr(obj)
elif isinstance(obj, dict):
_obj = {}
for name, value in obj.items():
name = serialize(name, init=False)
value = serialize(value, init=False)
if isinstance(name, NotSupported) or isinstance(value, NotSupported):
return NotSupported()
_obj[name] = value
elif isinstance(obj, (tuple, list)):
_obj = []
for nested in obj:
nested = serialize(nested, init=False)
if isinstance(nested, NotSupported):
return nested
_obj.append(nested)
_obj = type(obj)(_obj)
elif isinstance(obj, (str, bool, int, float)):
_obj = obj
else:
_obj = NotSupported()
return repr(_obj) if init else _obj
def _format_setting(name, value):
if isinstance(value, Remove):
return ""
try:
value = json.dumps(value, indent=4)
except TypeError:
value = serialize(value)
return "{name} = {value}".format(name=name, value=value)
def validate_changes(changes):
for name, value in changes.items():
if not isinstance(value, Remove):
try:
setting = Setting.settings[name]
except KeyError:
pass
else:
setting.validate_value(value)
def apply(changes, settings_file=get_settings_file()):
""" returns settings_file content with applied changes """
validate_changes(changes)
updates = _find_updates(changes, settings_file)
content = []
_changes = copy.copy(changes)
inside = False
lineno = None
if updates:
name, lineno, end = updates.pop(0)
# update existing variable declarations
with open(settings_file, 'r') as handler:
for num, line in enumerate(handler.readlines(), 1):
line = line.rstrip()
if num == lineno:
value = _changes.pop(name)
line = _format_setting(name, value)
if line:
content.append(line)
inside = True
comments = []
lastend = end
try:
name, lineno, end = updates.pop(0)
except IndexError:
if lastend is None:
break
if not inside:
content.append(line)
else:
# Discard lines since variable will be replaced
# But save comments and blank lines
if re.match(r'^\s*(#.*)*\s*$', line):
comments.append(line)
else:
comments = []
# End of variable declaration
if num == lastend:
content.extend(comments)
inside = False
# insert new variables at the end of file
for name, value in _changes.items():
content.append(_format_setting(name, value))
return '\n'.join(content)
def save(changes, settings_file=get_settings_file(), backup=True):
""" apply changes to project.settings file, saving a backup """
new_settings = apply(changes, settings_file)
tmp_settings_file = settings_file + '.tmp'
with open(tmp_settings_file, 'w') as handle:
handle.write(new_settings)
if backup:
os.rename(settings_file, settings_file + '.backup')
os.rename(tmp_settings_file, settings_file)
| 29.397661
| 81
| 0.580465
|
4a06ff9c6ed0e4079aee3b95764cd12a9ee07c9b
| 1,778
|
py
|
Python
|
home/migrations/0016_auto_20210624_1618.py
|
bart-merix/iogt
|
04270c27456aadeb0874eae0859733d19e56f005
|
[
"BSD-2-Clause"
] | null | null | null |
home/migrations/0016_auto_20210624_1618.py
|
bart-merix/iogt
|
04270c27456aadeb0874eae0859733d19e56f005
|
[
"BSD-2-Clause"
] | null | null | null |
home/migrations/0016_auto_20210624_1618.py
|
bart-merix/iogt
|
04270c27456aadeb0874eae0859733d19e56f005
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 3.1.12 on 2021-06-24 16:18
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0003_taggeditem_add_unique_index'),
('wagtailcore', '0059_apply_collection_ordering'),
('home', '0015_auto_20210624_1614'),
]
operations = [
migrations.RenameField(
model_name='section',
old_name='icon_active',
new_name='lead_image',
),
migrations.CreateModel(
name='TaggedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='wagtailcore.page')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='home_taggeditem_items', to='taggit.tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='article',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='home.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='section',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='home.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
]
| 39.511111
| 191
| 0.629921
|
4a07004d8b4e897c165bfc46e6f8edd266cb7cdf
| 3,374
|
py
|
Python
|
cairis/controllers/DimensionController.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/controllers/DimensionController.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
cairis/controllers/DimensionController.py
|
RachelLar/cairis_update
|
0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2
|
[
"Apache-2.0"
] | null | null | null |
import httplib
from flask import request, session, make_response
from flask.ext.restful import Resource
from flask_restful_swagger import swagger
from cairis.core.ARM import DatabaseProxyException
from cairis.daemon.CairisHTTPError import ARMHTTPError
from cairis.tools.JsonConverter import json_serialize
from cairis.tools.SessionValidator import validate_proxy
__author__ = 'Robin Quetin'
class DimensionsAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all dimensions of a specific table',
nickname='dimensions-table-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
},
{
"name": "constraint_id",
"description": "The ID of the constraint used when obtaining the data",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
},
{
"code": httplib.CONFLICT,
"message": "Database conflict"
}
]
)
# endregion
def get(self, table):
session_id = request.args.get('session_id', None)
id = request.args.get('constraint_id', -1)
db_proxy = validate_proxy(session, session_id)
try:
dimensions = db_proxy.getDimensions(table, id)
except DatabaseProxyException as ex:
raise ARMHTTPError(ex)
finally:
db_proxy.close()
resp = make_response(json_serialize(dimensions, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
class DimensionNamesAPI(Resource):
# region Swagger Doc
@swagger.operation(
notes='Get all dimensions of a specific table in a specific environment',
nickname='dimensions-table-environment-get',
parameters=[
{
"name": "session_id",
"description": "The ID of the user's session",
"required": False,
"allowMultiple": False,
"dataType": str.__name__,
"paramType": "query"
}
],
responseMessages=[
{
"code": httplib.BAD_REQUEST,
"message": "The database connection was not properly set up"
}
]
)
# endregion
def get(self, table, environment):
session_id = request.args.get('session_id', None)
db_proxy = validate_proxy(session, session_id)
try:
dimension_names = db_proxy.getDimensionNames(table, environment)
except DatabaseProxyException as ex:
raise ARMHTTPError(ex)
finally:
db_proxy.close()
resp = make_response(json_serialize(dimension_names, session_id=session_id), httplib.OK)
resp.headers['Content-type'] = 'application/json'
return resp
| 33.405941
| 96
| 0.57291
|
4a070138d9c2e79112fd4bed8e3eab931f56a4e5
| 707
|
py
|
Python
|
setup.py
|
ipsod/python_filehelpers
|
aca859f4246d33c45704f9aeafe2d7ba8c426292
|
[
"MIT"
] | null | null | null |
setup.py
|
ipsod/python_filehelpers
|
aca859f4246d33c45704f9aeafe2d7ba8c426292
|
[
"MIT"
] | null | null | null |
setup.py
|
ipsod/python_filehelpers
|
aca859f4246d33c45704f9aeafe2d7ba8c426292
|
[
"MIT"
] | null | null | null |
from setuptools import setup
VERSION = __import__("filehelpers").__version__
setup(
name='python_filehelpers',
description='A set of various file helpers.',
version=VERSION,
license='MIT',
author='Dusty Gamble',
author_email='dusty.gamble@gmail.com',
url='https://github.com/ipsod/python_filehelpers',
packages=['filehelpers'],
zip_safe=False,
classifiers=[
'Topic :: Utilities',
'Topic :: Text Processing',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
]
)
| 28.28
| 54
| 0.633663
|
4a0701bba81d9b0da4611cc9ee8f20a864fe40fd
| 3,619
|
py
|
Python
|
sdks/python/apache_beam/__init__.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 2
|
2020-06-25T00:47:43.000Z
|
2020-08-24T14:25:13.000Z
|
sdks/python/apache_beam/__init__.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 10
|
2017-07-20T13:38:13.000Z
|
2017-08-03T15:49:24.000Z
|
sdks/python/apache_beam/__init__.py
|
charithe/beam
|
f085cb500730cf0c67c467ac55f92b3c59f52b39
|
[
"Apache-2.0"
] | 1
|
2019-06-05T09:50:10.000Z
|
2019-06-05T09:50:10.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Apache Beam SDK for Python
==========================
`Apache Beam <https://beam.apache.org>`_ provides a simple, powerful programming
model for building both batch and streaming parallel data processing pipelines.
The Apache Beam SDK for Python provides access to Apache Beam capabilities
from the Python programming language.
Status
------
The SDK is still early in its development, and significant changes
should be expected before the first stable version.
Overview
--------
The key concepts in this programming model are
* :class:`~apache_beam.pvalue.PCollection`: represents a collection of data,
which could be bounded or unbounded in size.
* :class:`~apache_beam.transforms.ptransform.PTransform`: represents a
computation that transforms input PCollections into output PCollections.
* :class:`~apache_beam.pipeline.Pipeline`: manages a directed acyclic graph of
:class:`~apache_beam.transforms.ptransform.PTransform` s and
:class:`~apache_beam.pvalue.PCollection` s that is ready for execution.
* :class:`~apache_beam.runners.runner.PipelineRunner`: specifies where and how
the pipeline should execute.
* :class:`~apache_beam.io.iobase.Read`: read from an external source.
* :class:`~apache_beam.io.iobase.Write`: write to an external data sink.
Typical usage
-------------
At the top of your source file::
import apache_beam as beam
After this import statement
* Transform classes are available as
:class:`beam.FlatMap <apache_beam.transforms.core.FlatMap>`,
:class:`beam.GroupByKey <apache_beam.transforms.core.GroupByKey>`, etc.
* Pipeline class is available as
:class:`beam.Pipeline <apache_beam.pipeline.Pipeline>`
* Text read/write transforms are available as
:class:`beam.io.ReadFromText <apache_beam.io.textio.ReadFromText>`,
:class:`beam.io.WriteToText <apache_beam.io.textio.WriteToText>`.
Examples
--------
The `examples subdirectory
<https://github.com/apache/beam/tree/master/sdks/python/apache_beam/examples>`_
has some examples.
"""
from __future__ import absolute_import
import os
import sys
import warnings
if sys.version_info[0] == 3:
warnings.warn(
'Some syntactic constructs of Python 3 are not yet fully supported by '
'Apache Beam.')
elif sys.version_info[0] == 2 and sys.version_info[1] == 7:
pass
else:
raise RuntimeError(
'The Apache Beam SDK for Python is only supported on Python 2.7 or '
'Python 3. It is not supported on Python [' +
str(sys.version_info) + '].')
# pylint: disable=wrong-import-position
import apache_beam.internal.pickler
from apache_beam import coders
from apache_beam import io
from apache_beam import typehints
from apache_beam import version
from apache_beam.pipeline import Pipeline
from apache_beam.transforms import *
# pylint: enable=wrong-import-position
__version__ = version.__version__
| 34.141509
| 80
| 0.761813
|
4a0702dfdc530127f392e87aa955525cc235221d
| 3,217
|
py
|
Python
|
test/drivers/second_quantization/pyscfd/test_driver_pyscf.py
|
mtreinish/qiskit-nature
|
bf124c4628a7c1b25b45722d2299b9489e15ac17
|
[
"Apache-2.0"
] | null | null | null |
test/drivers/second_quantization/pyscfd/test_driver_pyscf.py
|
mtreinish/qiskit-nature
|
bf124c4628a7c1b25b45722d2299b9489e15ac17
|
[
"Apache-2.0"
] | 1
|
2021-08-25T13:31:41.000Z
|
2021-08-25T13:31:41.000Z
|
test/drivers/second_quantization/pyscfd/test_driver_pyscf.py
|
LaurinFischer/qiskit-nature
|
7baf7c7f8c3d18e082e90bc1e593c47aa2f698ca
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Driver PySCF """
import unittest
from test import QiskitNatureTestCase, requires_extra_library
from test.drivers.second_quantization.test_driver import TestDriver
from qiskit_nature.drivers import UnitsType
from qiskit_nature.drivers.second_quantization import (
PySCFDriver,
ElectronicStructureDriverType,
ElectronicStructureMoleculeDriver,
)
from qiskit_nature import QiskitNatureError
class TestDriverPySCF(QiskitNatureTestCase, TestDriver):
"""PYSCF Driver tests."""
@requires_extra_library
def setUp(self):
super().setUp()
driver = PySCFDriver(
atom="H .0 .0 .0; H .0 .0 0.735",
unit=UnitsType.ANGSTROM,
charge=0,
spin=0,
basis="sto3g",
)
self.qmolecule = driver.run()
def test_h3(self):
"""Test for H3 chain, see also https://github.com/Qiskit/qiskit-aqua/issues/1148."""
atom = "H 0 0 0; H 0 0 1; H 0 0 2"
driver = PySCFDriver(atom=atom, unit=UnitsType.ANGSTROM, charge=0, spin=1, basis="sto3g")
molecule = driver.run()
self.assertAlmostEqual(molecule.hf_energy, -1.523996200246108, places=5)
def test_h4(self):
"""Test for H4 chain"""
atom = "H 0 0 0; H 0 0 1; H 0 0 2; H 0 0 3"
driver = PySCFDriver(atom=atom, unit=UnitsType.ANGSTROM, charge=0, spin=0, basis="sto3g")
molecule = driver.run()
self.assertAlmostEqual(molecule.hf_energy, -2.09854593699776, places=5)
def test_invalid_atom_type(self):
"""Atom is string with ; separator or list of string"""
with self.assertRaises(QiskitNatureError):
PySCFDriver(atom=("H", 0, 0, 0))
def test_list_atom(self):
"""Check input with list of strings"""
atom = ["H 0 0 0", "H 0 0 1"]
driver = PySCFDriver(atom=atom, unit=UnitsType.ANGSTROM, charge=0, spin=0, basis="sto3g")
molecule = driver.run()
self.assertAlmostEqual(molecule.hf_energy, -1.0661086493179366, places=5)
def test_zmatrix(self):
"""Check z-matrix input"""
atom = "H; H 1 1.0"
driver = PySCFDriver(atom=atom, unit=UnitsType.ANGSTROM, charge=0, spin=0, basis="sto3g")
molecule = driver.run()
self.assertAlmostEqual(molecule.hf_energy, -1.0661086493179366, places=5)
class TestDriverPySCFMolecule(QiskitNatureTestCase, TestDriver):
"""PYSCF Driver Molecule tests."""
@requires_extra_library
def setUp(self):
super().setUp()
driver = ElectronicStructureMoleculeDriver(
TestDriver.MOLECULE, driver_type=ElectronicStructureDriverType.PYSCF
)
self.qmolecule = driver.run()
if __name__ == "__main__":
unittest.main()
| 35.744444
| 97
| 0.669257
|
4a0702edaa37302ab4bdea587ec00d36f67c1e66
| 105
|
py
|
Python
|
pnpm/compat.py
|
gfunkmonk/django-pnpm
|
cbde236f26e1039f0b880dd7f97d3c256dd12f3b
|
[
"MIT"
] | 1
|
2019-10-17T15:13:13.000Z
|
2019-10-17T15:13:13.000Z
|
pnpm/compat.py
|
gfunkmonk/django-npm
|
cbde236f26e1039f0b880dd7f97d3c256dd12f3b
|
[
"MIT"
] | null | null | null |
pnpm/compat.py
|
gfunkmonk/django-npm
|
cbde236f26e1039f0b880dd7f97d3c256dd12f3b
|
[
"MIT"
] | 1
|
2019-10-17T15:13:13.000Z
|
2019-10-17T15:13:13.000Z
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
| 21
| 39
| 0.8
|
4a070314e91835ccd443bfce418bdf6f29d629c9
| 1,963
|
py
|
Python
|
facebook_business/adobjects/videocopyrightsegment.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 576
|
2018-05-01T19:09:32.000Z
|
2022-03-31T11:45:11.000Z
|
facebook_business/adobjects/videocopyrightsegment.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 217
|
2018-05-03T07:31:59.000Z
|
2022-03-29T14:19:52.000Z
|
facebook_business/adobjects/videocopyrightsegment.py
|
MyrikLD/facebook-python-business-sdk
|
a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814
|
[
"CNRI-Python"
] | 323
|
2018-05-01T20:32:26.000Z
|
2022-03-29T07:05:12.000Z
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class VideoCopyrightSegment(
AbstractObject,
):
def __init__(self, api=None):
super(VideoCopyrightSegment, self).__init__()
self._isVideoCopyrightSegment = True
self._api = api
class Field(AbstractObject.Field):
duration_in_sec = 'duration_in_sec'
media_type = 'media_type'
start_time_in_sec = 'start_time_in_sec'
_field_types = {
'duration_in_sec': 'float',
'media_type': 'string',
'start_time_in_sec': 'float',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 35.053571
| 79
| 0.738156
|
4a0703557eaa7f1aeb7619c4fccfd3f03abecdc8
| 717
|
py
|
Python
|
boj/stack/boj_1935.py
|
ruslanlvivsky/python-algorithm
|
2b49bed33cd0e95b8a1e758008191f4392b3f667
|
[
"MIT"
] | 3
|
2021-07-18T14:40:24.000Z
|
2021-08-14T18:08:13.000Z
|
boj/stack/boj_1935.py
|
jinsuSang/python-algorithm
|
524849a0a7e71034d329fef63c4f384930334177
|
[
"MIT"
] | null | null | null |
boj/stack/boj_1935.py
|
jinsuSang/python-algorithm
|
524849a0a7e71034d329fef63c4f384930334177
|
[
"MIT"
] | null | null | null |
import sys
N = int(sys.stdin.readline().strip())
expression = sys.stdin.readline().strip()
nums = dict()
for expr in expression:
if expr not in '*+-/' and expr not in nums:
nums[expr] = int(sys.stdin.readline().strip())
stack = list()
for expr in expression:
if expr not in '*+-/':
stack.append(nums[expr])
else:
num1 = stack.pop()
num2 = stack.pop()
value = 0
if expr == '+':
value = num2 + num1
elif expr == '-':
value = num2 - num1
elif expr == '*':
value = num2 * num1
elif expr == '/':
value = num2 / num1
stack.append(value)
sys.stdout.write(f'{stack.pop():.2f}')
| 21.727273
| 54
| 0.51046
|
4a07038c737a566e6636554a0ed09a6c5dd9b3d5
| 507
|
py
|
Python
|
src/sensors/gas.py
|
haze-sama/Green-Monitoring.github.io
|
3d6b605c96a4acdffabd6aeeb2cb985f368e8a7a
|
[
"MIT"
] | null | null | null |
src/sensors/gas.py
|
haze-sama/Green-Monitoring.github.io
|
3d6b605c96a4acdffabd6aeeb2cb985f368e8a7a
|
[
"MIT"
] | null | null | null |
src/sensors/gas.py
|
haze-sama/Green-Monitoring.github.io
|
3d6b605c96a4acdffabd6aeeb2cb985f368e8a7a
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO, time
class Gas(object):
def __init__(self, channel):
print('Gas initialization.')
self.channel = channel
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
def start(self):
print('Start gas detection.')
GPIO.add_event_detect(self.channel, GPIO.RISING)
GPIO.add_event_callback(self.channel, self.callback)
def callback(self, channel):
print('Sensor detected action!')
| 29.823529
| 69
| 0.662722
|
4a070429fde0b500a5cc7ad34edf2f058725a042
| 2,822
|
py
|
Python
|
modules/dense_correspondence_manipulation/scripts/convert_ply_to_vtp.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
modules/dense_correspondence_manipulation/scripts/convert_ply_to_vtp.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
modules/dense_correspondence_manipulation/scripts/convert_ply_to_vtp.py
|
masato-ka/pytorch-dense-correspondence
|
89a5f87fd773b210e93ebcfeb945c95e7417d0e9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
import os
import dense_correspondence_manipulation.utils.utils as utils
def run(data_folder, ply_binary_filename='images.ply'):
# install ply if do not already have it
os.chdir(data_folder)
vtp_filename = os.path.join(data_folder, 'images.vtp')
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
ply_to_ascii_executable = os.path.join(dc_source_dir, 'src', 'ply', 'ply2ascii')
path_to_ply = os.path.join(dc_source_dir, "src", "ply")
if not (os.path.isfile(ply_to_ascii_executable)):
os.system("cd " + path_to_ply + " && make")
correct_ply_header_file = os.path.join(dc_source_dir, 'config', 'correct_ply_header.txt')
ply_binary_full_filename = os.path.join(data_folder, ply_binary_filename)
converted_ascii_filename = os.path.join(data_folder, "converted_to_ascii.ply")
converted_ascii_modified_header_filename = os.path.join(data_folder, "converted_to_ascii_modified_header.ply")
# call ply2ascii
os.system(ply_to_ascii_executable + "<./" + ply_binary_filename + "> " + converted_ascii_filename)
# change header to be compatible with Director
# TODO: make so Director accepts other header?
line_elements_vertex = ""
with open(converted_ascii_modified_header_filename, 'w') as outfile:
with open(converted_ascii_filename) as infile:
counter = 0
for line in infile:
counter += 1
if counter == 3:
line_elements_vertex = line
break
with open(correct_ply_header_file) as infile:
counter = 0
for line in infile:
counter += 1
if counter == 4:
outfile.write(line_elements_vertex)
continue
outfile.write(line)
with open(converted_ascii_filename) as infile:
num_skip = 14
counter = 0
for line in infile:
counter += 1
if counter <= 14:
continue
outfile.write(line)
# convert to vtp
convert_ply_to_vtp_script = os.path.join(dc_source_dir, 'modules',
'dense_correspondence_manipulation', 'scripts', 'convertPlyToVtp.py')
print("converted to ascii ply format")
os.system("directorPython " + convert_ply_to_vtp_script + " " + converted_ascii_modified_header_filename)
converted_ascii_modified_header_vtp_filename = os.path.join(data_folder, "converted_to_ascii_modified_header.vtp")
print("finished convert_ply_to_vtp_script")
# clean up and rename
# os.system("rm *.ply *.freiburg")
os.rename(converted_ascii_modified_header_vtp_filename, vtp_filename)
if __name__ == "__main__":
run(os.getcwd())
| 37.131579
| 118
| 0.651665
|
4a070489e4fcf3087b745ef32e4808f765f82681
| 11,208
|
py
|
Python
|
fltk/util/base_config.py
|
nata1y/fltk-testbed-group-3
|
e23b59fa2a5e638d3804a39fe5012983e2988ca6
|
[
"BSD-2-Clause"
] | null | null | null |
fltk/util/base_config.py
|
nata1y/fltk-testbed-group-3
|
e23b59fa2a5e638d3804a39fe5012983e2988ca6
|
[
"BSD-2-Clause"
] | null | null | null |
fltk/util/base_config.py
|
nata1y/fltk-testbed-group-3
|
e23b59fa2a5e638d3804a39fe5012983e2988ca6
|
[
"BSD-2-Clause"
] | 2
|
2021-05-03T17:40:18.000Z
|
2021-05-11T09:34:30.000Z
|
import torch
import json
from fltk.datasets.distributed import DistCIFAR10Dataset, DistCIFAR100Dataset, DistFashionMNISTDataset
from fltk.nets import Cifar10CNN, FashionMNISTCNN, Cifar100ResNet, FashionMNISTResNet, Cifar10ResNet, Cifar100VGG
SEED = 1
torch.manual_seed(SEED)
class BareConfig:
def __init__(self):
# self.logger = logger
self.batch_size = 10
self.test_batch_size = 1000
self.epochs = 1
self.lr = 0.001
self.momentum = 0.9
self.cuda = False
self.shuffle = False
self.log_interval = 10
self.kwargs = {}
self.contribution_measurement_round = 1
self.contribution_measurement_metric = 'Influence'
self.scheduler_step_size = 50
self.scheduler_gamma = 0.5
self.min_lr = 1e-10
self.round_worker_selection_strategy = None
self.round_worker_selection_strategy_kwargs = None
self.save_model = False
self.save_temp_model = False
self.save_epoch_interval = 1
self.save_model_path = "models"
self.epoch_save_start_suffix = "start"
self.epoch_save_end_suffix = "end"
self.get_poison_effort = 'half'
self.num_workers = 50
# self.num_poisoned_workers = 10
self.federator_host = '0.0.0.0'
self.rank = 0
self.world_size = 0
self.data_sampler = None
self.distributed = False
self.available_nets = {
"Cifar100ResNet": Cifar100ResNet,
"Cifar100VGG": Cifar100VGG,
"Cifar10CNN": Cifar10CNN,
"Cifar10ResNet": Cifar10ResNet,
"FashionMNISTCNN": FashionMNISTCNN,
"FashionMNISTResNet": FashionMNISTResNet
}
self.net = None
self.set_net_by_name('Cifar10CNN')
self.dataset_name = 'cifar10'
self.DistDatasets = {
'cifar10': DistCIFAR10Dataset,
'cifar100': DistCIFAR100Dataset,
'fashion-mnist': DistFashionMNISTDataset,
}
self.train_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/train_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/train_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/train_data_loader.pickle',
}
self.test_data_loader_pickle_path = {
'cifar10': 'data_loaders/cifar10/test_data_loader.pickle',
'fashion-mnist': 'data_loaders/fashion-mnist/test_data_loader.pickle',
'cifar100': 'data_loaders/cifar100/test_data_loader.pickle',
}
self.loss_function = torch.nn.CrossEntropyLoss
self.default_model_folder_path = "default_models"
self.data_path = "data"
###########
# Methods #
###########
def merge_yaml(self, cfg = {}):
"""
total_epochs: 20
epochs_per_cycle: 2
wait_for_clients: true
net: Cifar10CNN
dataset: cifar10
experiment_prefix: 'experiment'
output_location: 'output'
tensor_board_active: true
:param yaml_config:
:return:
"""
if 'total_epochs' in cfg:
self.epochs = cfg['total_epochs']
if 'epochs_per_cycle' in cfg:
self.epochs_per_cycle = cfg['epochs_per_cycle']
if 'wait_for_clients' in cfg:
self.wait_for_clients = cfg['wait_for_clients']
if 'net' in cfg:
self.set_net_by_name(cfg['net'])
if 'dataset' in cfg:
self.dataset_name = cfg['dataset']
if 'experiment_prefix' in cfg:
self.experiment_prefix = cfg['experiment_prefix']
if 'output_location' in cfg:
self.output_location = cfg['output_location']
if 'tensor_board_active' in cfg:
self.tensor_board_active = cfg['tensor_board_active']
if 'clients_per_round' in cfg:
self.clients_per_round = cfg['clients_per_round']
if 'system' in cfg:
if 'clients' in cfg['system']:
if 'amount' in cfg['system']['clients']:
self.world_size = cfg['system']['clients']['amount'] + 1
if 'system' in cfg:
if 'federator' in cfg['system']:
if 'hostname' in cfg['system']['federator']:
self.federator_host = cfg['system']['federator']['hostname']
if 'nic' in cfg['system']['federator']:
self.nic = cfg['system']['federator']['nic']
if 'cuda' in cfg:
if cfg['cuda']:
self.cuda = True
else:
self.cuda = False
def init_logger(self, logger):
self.logger = logger
def get_distributed(self):
return self.distributed
def get_rank(self):
return self.rank
def get_world_size(self):
return self.world_size
def set_sampler(self, sampler):
self.data_sampler = sampler
def get_sampler(self):
return self.data_sampler
def get_round_worker_selection_strategy(self):
return self.round_worker_selection_strategy
def get_round_worker_selection_strategy_kwargs(self):
return self.round_worker_selection_strategy_kwargs
def set_round_worker_selection_strategy_kwargs(self, kwargs):
self.round_worker_selection_strategy_kwargs = kwargs
def set_client_selection_strategy(self, strategy):
self.round_worker_selection_strategy = strategy
def get_data_path(self):
return self.data_path
def get_epoch_save_start_suffix(self):
return self.epoch_save_start_suffix
def get_epoch_save_end_suffix(self):
return self.epoch_save_end_suffix
def get_dataloader_list(self):
return list(self.train_data_loader_pickle_path.keys())
def get_nets_list(self):
return list(self.available_nets.keys())
def set_train_data_loader_pickle_path(self, path, name='cifar10'):
self.train_data_loader_pickle_path[name] = path
def get_train_data_loader_pickle_path(self):
return self.train_data_loader_pickle_path[self.dataset_name]
def set_test_data_loader_pickle_path(self, path, name='cifar10'):
self.test_data_loader_pickle_path[name] = path
def get_test_data_loader_pickle_path(self):
return self.test_data_loader_pickle_path[self.dataset_name]
def set_net_by_name(self, name: str):
self.net = self.available_nets[name]
def get_cuda(self):
return self.cuda
def get_scheduler_step_size(self):
return self.scheduler_step_size
def get_scheduler_gamma(self):
return self.scheduler_gamma
def get_min_lr(self):
return self.min_lr
def get_default_model_folder_path(self):
return self.default_model_folder_path
def get_num_epochs(self):
return self.epochs
def set_num_poisoned_workers(self, num_poisoned_workers):
self.num_poisoned_workers = num_poisoned_workers
def set_num_workers(self, num_workers):
self.num_workers = num_workers
def set_model_save_path(self, save_model_path):
self.save_model_path = save_model_path
def get_logger(self):
return self.logger
def get_loss_function(self):
return self.loss_function
def get_net(self):
return self.net
def get_num_workers(self):
return self.num_workers
def get_num_poisoned_workers(self):
return self.num_poisoned_workers
def get_poison_effort(self):
return self.get_poison_effort
def get_learning_rate(self):
return self.lr
def get_momentum(self):
return self.momentum
def get_shuffle(self):
return self.shuffle
def get_batch_size(self):
return self.batch_size
def get_test_batch_size(self):
return self.test_batch_size
def get_log_interval(self):
return self.log_interval
def get_save_model_folder_path(self):
return self.save_model_path
def get_learning_rate_from_epoch(self, epoch_idx):
lr = self.lr * (self.scheduler_gamma ** int(epoch_idx / self.scheduler_step_size))
if lr < self.min_lr:
self.logger.warning("Updating LR would place it below min LR. Skipping LR update.")
return self.min_lr
self.logger.debug("LR: {}".format(lr))
return lr
def get_contribution_measurement_round(self):
return self.contribution_measurement_round
def get_contribution_measurement_metric(self):
return self.contribution_measurement_metric
def should_save_model(self, epoch_idx):
"""
Returns true/false models should be saved.
:param epoch_idx: current training epoch index
:type epoch_idx: int
"""
if not self.save_model:
return False
if epoch_idx == 1 or epoch_idx % self.save_epoch_interval == 0:
return True
def log(self):
"""
Log this arguments object to the logger.
"""
self.logger.debug("Arguments: {}", str(self))
def __str__(self):
return "\nBatch Size: {}\n".format(self.batch_size) + \
"Test Batch Size: {}\n".format(self.test_batch_size) + \
"Epochs: {}\n".format(self.epochs) + \
"Learning Rate: {}\n".format(self.lr) + \
"Momentum: {}\n".format(self.momentum) + \
"CUDA Enabled: {}\n".format(self.cuda) + \
"Shuffle Enabled: {}\n".format(self.shuffle) + \
"Log Interval: {}\n".format(self.log_interval) + \
"Scheduler Step Size: {}\n".format(self.scheduler_step_size) + \
"Scheduler Gamma: {}\n".format(self.scheduler_gamma) + \
"Scheduler Minimum Learning Rate: {}\n".format(self.min_lr) + \
"Client Selection Strategy: {}\n".format(self.round_worker_selection_strategy) + \
"Client Selection Strategy Arguments: {}\n".format(
json.dumps(self.round_worker_selection_strategy_kwargs, indent=4, sort_keys=True)) + \
"Model Saving Enabled: {}\n".format(self.save_model) + \
"Model Saving Interval: {}\n".format(self.save_epoch_interval) + \
"Model Saving Path (Relative): {}\n".format(self.save_model_path) + \
"Epoch Save Start Prefix: {}\n".format(self.epoch_save_start_suffix) + \
"Epoch Save End Suffix: {}\n".format(self.epoch_save_end_suffix) + \
"Number of Clients: {}\n".format(self.num_workers) + \
"Number of Poisoned Clients: {}\n".format(self.num_poisoned_workers) + \
"NN: {}\n".format(self.net) + \
"Train Data Loader Path: {}\n".format(self.train_data_loader_pickle_path) + \
"Test Data Loader Path: {}\n".format(self.test_data_loader_pickle_path) + \
"Loss Function: {}\n".format(self.loss_function) + \
"Default Model Folder Path: {}\n".format(self.default_model_folder_path) + \
"Data Path: {}\n".format(self.data_path) + \
"Dataset Name: {}\n".format(self.dataset_name)
| 34.275229
| 113
| 0.629907
|
4a0704fb296cd2480dae89b5b7b50ef5613fe20d
| 1,336
|
py
|
Python
|
particles.py
|
Unbewohnte/Healthless
|
745d3217777bbf54cdab09511bddff5d4e117b37
|
[
"MIT"
] | null | null | null |
particles.py
|
Unbewohnte/Healthless
|
745d3217777bbf54cdab09511bddff5d4e117b37
|
[
"MIT"
] | null | null | null |
particles.py
|
Unbewohnte/Healthless
|
745d3217777bbf54cdab09511bddff5d4e117b37
|
[
"MIT"
] | null | null | null |
import pygame
from random import randint,randrange
particles_on_screen_e = []
particles_on_screen_p = []
class Particle:
def __init__(self,x,y):
self.x = x
self.y = y
self.width = 4
self.height = self.width
self.vel = int(randint(0,3)/5)
self.rect = pygame.Rect(self.x,self.y,self.width,self.height)
self.timer = randint(10,66)
def draw(self,window):
for i in range(5):
pygame.draw.rect(window,(randint(200,255),randint(50,255),20),(self.rect[0] + randint(-35,35), self.rect[1] + randint(-30,30), self.rect[2], self.rect[3]))
def update(self,side):
if str(side) == "up":
for particle in particles_on_screen_e:
particle.timer -= 0.5
particle.rect[1] += (self.vel + randrange(-7,1))
particle.rect[0] += (self.vel + randrange(-3,3))
if particle.timer <= 0:
particles_on_screen_e.remove(particle)
if str(side) == "down":
for particle in particles_on_screen_p:
particle.timer -= 0.5
particle.rect[1] += (self.vel + randrange(-1,7))
particle.rect[0] += (self.vel + randrange(-3,3))
if particle.timer <= 0:
particles_on_screen_p.remove(particle)
| 40.484848
| 167
| 0.556138
|
4a0706660e08591cd9b87a962f65f82ca281639d
| 849
|
py
|
Python
|
shoottikala/models/day.py
|
conikuvat/shootti-ilmo
|
bf5ab15e20173994bac25e6b5cd3aec42f671f05
|
[
"MIT"
] | null | null | null |
shoottikala/models/day.py
|
conikuvat/shootti-ilmo
|
bf5ab15e20173994bac25e6b5cd3aec42f671f05
|
[
"MIT"
] | 9
|
2017-02-15T20:36:49.000Z
|
2017-05-26T12:10:43.000Z
|
shoottikala/models/day.py
|
conikuvat/shootti-ilmo
|
bf5ab15e20173994bac25e6b5cd3aec42f671f05
|
[
"MIT"
] | null | null | null |
import logging
from django.db import models
from ..utils import log_get_or_create
logger = logging.getLogger(__name__)
class Day(models.Model):
name = models.CharField(max_length=31)
abbreviation = models.CharField(max_length=3)
class Meta:
ordering = ('id',)
verbose_name = 'Päivä'
verbose_name_plural = 'Päivät'
def __str__(self):
return self.name
@classmethod
def ensure_days_exist(cls):
for day_id, day_name, day_abbreviation in [
(4, 'Perjantai', 'Pe'),
(5, 'Lauantai', 'La'),
(6, 'Sunnuntai', 'Su'),
]:
day, created = cls.objects.get_or_create(id=day_id, defaults=dict(
name=day_name,
abbreviation=day_abbreviation
))
log_get_or_create(logger, day, created)
| 24.257143
| 78
| 0.595995
|
4a070763b80b6878f798db83a381bcf0699f77e4
| 6,485
|
py
|
Python
|
docs/conf.py
|
alexdlaird/hookie
|
6a5228e7893845fd0a7afc983c6e1852d8f21589
|
[
"MIT"
] | 3
|
2020-08-06T02:15:04.000Z
|
2021-09-21T18:51:46.000Z
|
docs/conf.py
|
alexdlaird/hookie
|
6a5228e7893845fd0a7afc983c6e1852d8f21589
|
[
"MIT"
] | 4
|
2020-10-12T13:08:55.000Z
|
2021-03-02T20:04:00.000Z
|
docs/conf.py
|
alexdlaird/hookie
|
6a5228e7893845fd0a7afc983c6e1852d8f21589
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Project information -----------------------------------------------------
from hookee import cli
project = "hookee"
copyright = "2020, Alex Laird"
author = "Alex Laird"
# The short X.Y version
version = cli.__version__
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"notfound.extension",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [
".rst"
]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "venv"]
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"show_powered_by": False,
"github_user": "alexdlaird",
"github_repo": "hookee",
"github_banner": True,
"show_related": False,
"note_bg": "#FFF59C",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_extra_path = ["_html"]
html_css_files = [
"custom.css",
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``["localtoc.html", "relations.html", "sourcelink.html",
# "searchbox.html"]``.
#
html_sidebars = {
"index": [
"sidebartoc.html",
"usefullinks.html",
"searchbox.html",
],
"**": [
"sidebartoc.html",
"localtoc.html",
"usefullinks.html",
"searchbox.html",
],
}
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "hookeedoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ("letterpaper" or "a4paper").
#
# "papersize": "letterpaper",
# The font size ("10pt", "11pt" or "12pt").
#
# "pointsize": "10pt",
# Additional stuff for the LaTeX preamble.
#
# "preamble": "",
# Latex figure (float) alignment
#
# "figure_align": "htbp",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "hookee.tex", "hookee Documentation",
"Alex Laird", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "hookee", "hookee Documentation",
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, "hookee", "hookee Documentation",
author, "hookee", "Command line webhooks, on demand.",
"Miscellaneous"),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ""
# A unique identification for the text.
#
# epub_uid = ""
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
intersphinx_mapping = {
"click": ("https://click.palletsprojects.com/en/7.x", None),
"confuse": ("https://confuse.readthedocs.io/en/latest/", None),
"flask": ("https://flask.palletsprojects.com/en/1.1.x/", None),
"pyngrok": ("https://pyngrok.readthedocs.io/en/latest/", None),
"python": ("https://docs.python.org/3", None)
}
| 29.747706
| 79
| 0.643948
|
4a07079a40678e82901e028e9201ea57822f7b76
| 2,431
|
py
|
Python
|
data/p4VQE/R1/benchmark/startQiskit_Class164.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class164.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R1/benchmark/startQiskit_Class164.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.x(input_qubit[2]) # number=7
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.cx(input_qubit[1],input_qubit[0]) # number=5
prog.cx(input_qubit[1],input_qubit[0]) # number=6
prog.x(input_qubit[3]) # number=8
prog.x(input_qubit[3]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5200
writefile = open("../data/startQiskit_Class164.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.314607
| 118
| 0.633896
|
4a07083a206ac794228576cd268bf64122b5cb53
| 5,371
|
py
|
Python
|
rainman2/lib/interface.py
|
att-innovate/rainman2
|
edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e
|
[
"MIT"
] | 2
|
2018-06-19T16:52:25.000Z
|
2018-06-25T22:05:38.000Z
|
rainman2/lib/interface.py
|
att-innovate/rainman2
|
edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e
|
[
"MIT"
] | null | null | null |
rainman2/lib/interface.py
|
att-innovate/rainman2
|
edd07c03a9d33a2e44b3a333fc28dc73c8cbe56e
|
[
"MIT"
] | 1
|
2020-09-15T03:06:08.000Z
|
2020-09-15T03:06:08.000Z
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Defines internal interface for rainman2
"""
import logging
from rainman2.utils import exceptions
from rainman2.utils import common_utils
from rainman2.lib.algorithm.Qlearning import controller
from rainman2.lib.environment.cellular import base as cellular_base
__author__ = 'Ari Saha (arisaha@icloud.com)'
__date__ = 'Thursday, February 15th 2018, 1:29:18 pm'
SUPPORTED_ALGORITHMS = {
'Qlearning': controller.QController
}
SUPPORTED_ENVIRONMENTS = {
'Cellular': cellular_base.CellularNetworkEnv,
}
class Rainman2:
# pylint: disable=too-few-public-methods
"""
Definition of internal API
"""
def __init__(self, settings):
"""
Initialize internal API object
"""
self.settings = settings
self.algorithm_config = self.settings.algorithm_config
self.environment_config = self.settings.environment_config
self.update_env = self.settings.update_env
self.logger = logging.getLogger(self.__class__.__name__)
def _build_env_client(self, env_name):
"""
Helper to build envrionment's client if any
"""
if env_name == 'Cellular':
try:
client = cellular_base.initialize_client(
self.environment_config)
except exceptions.ClientNotImplemented as error:
self.logger.debug("Error: {}".format(error))
raise
else:
return client
return None
def _build_env_instance(self, env_name):
"""
Helper method to instantiate Env object
"""
self.environment_config = self.update_env(env_name)
if env_name not in SUPPORTED_ENVIRONMENTS:
error = "Environment: {} is not implemented!".format(env_name)
self.logger.debug(error)
raise exceptions.EnvironmentNotImplemented(error)
self.logger.info(
"Building Environment instance: {}".format(env_name))
env_client = self._build_env_client(env_name)
return SUPPORTED_ENVIRONMENTS[env_name](
self.environment_config, env_client)
def _build_alg_instance(self, algorithm_name, env_instance, agent_name):
"""
Helper method to instantiate Alg object
Args:
algorithm_name: (instance of agorithm)
Reinforcement-Learning algorithm to evaluate the environment.
agent_name: (instance of agent)
Algorithm's agent
"""
if algorithm_name not in SUPPORTED_ALGORITHMS:
error = "Algorithm: {} is not implemented".format(algorithm_name)
self.logger.debug(error)
raise exceptions.AlgorithmNotImplemented(error)
self.logger.debug(
"Building Algorithm instance: {}".format(algorithm_name))
return SUPPORTED_ALGORITHMS[algorithm_name](
self.algorithm_config, env_instance, agent_name)
@common_utils.timeit
def run_experiment(self, env_name, algorithm_name, agent_name=None):
"""
Defines interface to run an experiment
Args:
env_name: (Name of the environment)
Environment Name
algorithm_name: (instance of agorithm)
Reinforcement-Learning algorithm to evaluate the environment.
agent_name: (instance of agent)
Algorithm's agent
Returns:
results: (instance of output)
"""
self.logger.info("Starting experiment!")
try:
env_instance = self._build_env_instance(env_name)
except exceptions.EnvironmentNotImplemented as error:
raise
try:
alg_instance = self._build_alg_instance(
algorithm_name, env_instance, agent_name)
except exceptions.AlgorithmNotImplemented as error:
raise
try:
output = alg_instance.execute()
except Exception as error:
self.logger.exception(
"Experiment failed! Error: {}".format(error))
else:
return output
def main():
"""
Performance testing
"""
# Server profile: num_ues=200, APs=16, Scale=200.0, explore_radius=1
from collections import OrderedDict
from rainman2.settings import SETTINGS
ALGORITHM_CONFIG = OrderedDict(
EPISODES=1,
ALPHA=0.2,
GAMMA=0.9,
EPSILON=0.3,
EPSILON_DECAY=0.99,
EPSILON_MIN=0.01,
VERBOSE=True,
L1_HIDDEN_UNITS=13,
L2_HIDDEN_UNITS=13,
L1_ACTIVATION='relu',
L2_ACTIVATION='relu',
LOSS_FUNCTION='mean_squared_error',
OPTIMIZER='Adam',
)
CELLULAR_MODEL_CONFIG = OrderedDict(
NAME='Cellular',
TYPE='Dev',
SERVER='0.0.0.0',
SERVER_PORT='8000',
VERBOSE=True,
)
rainman2 = Rainman2(SETTINGS)
rainman2.algorithm_config = ALGORITHM_CONFIG
rainman2.environment_config = CELLULAR_MODEL_CONFIG
result = rainman2.run_experiment("Cellular", "Qlearning", "Naive")
print("Number of states encountered: {}".format(len(result.Q)))
print("Number of q_ap_states encountered: {}".format(len(result.Q_ap)))
print(result.Q)
print(result.Q_ap)
if __name__ == '__main__':
main()
| 30.691429
| 77
| 0.630423
|
4a070916dbbae38cb26121cf28a542c5004b610e
| 1,910
|
py
|
Python
|
scripts/data/preprocess_sent_class.py
|
lynli/fairseq
|
70b02caf633ef9041b033941bd90306c36cdc5b7
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/data/preprocess_sent_class.py
|
lynli/fairseq
|
70b02caf633ef9041b033941bd90306c36cdc5b7
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/data/preprocess_sent_class.py
|
lynli/fairseq
|
70b02caf633ef9041b033941bd90306c36cdc5b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--inputs',
required=True,
nargs='+',
help='files to process.',
)
parser.add_argument(
'--output',
required=True,
metavar='DIR',
help='Path for output',
)
parser.add_argument(
'--label-col',
type=int,
default=1,
metavar='N',
help='column for the label (0-based indexing)',
)
parser.add_argument(
'--skip-rows',
type=int,
default=0,
metavar='N',
help='number of rows to skip',
)
parser.add_argument(
'--data-col',
type=int,
default=3,
metavar='N',
help='column for the data (0-based indexing)',
)
parser.add_argument(
'--separator',
default='\t',
metavar='SEP',
help='separator between columns',
)
args = parser.parse_args()
print(args)
for inp in args.inputs:
filename = os.path.basename(inp)
base_filename = os.path.splitext(filename)[0]
data_filename = base_filename + '.txt'
label_filename = base_filename + '.lbl'
with open(inp, 'r') as f_in, open(os.path.join(args.output, data_filename), 'w') as data_out, open(
os.path.join(args.output, label_filename), 'w') as lbl_out:
for i, line in enumerate(f_in):
if i < args.skip_rows:
continue
parts = line.strip().split(args.separator)
if args.label_col >= 0:
print(parts[args.label_col], file=lbl_out)
print(parts[args.data_col], file=data_out)
if args.label_col < 0:
os.remove(os.path.join(args.output, label_filename))
if __name__ == '__main__':
main()
| 24.805195
| 107
| 0.53822
|
4a07091b59b0f38230c0fe135cf6b9ae86b77b8f
| 938
|
py
|
Python
|
randomfun.py
|
angelruvalcaba/learning_python
|
5998dfd8a38e63793e45962ccc69c19ae63d5e67
|
[
"MIT"
] | null | null | null |
randomfun.py
|
angelruvalcaba/learning_python
|
5998dfd8a38e63793e45962ccc69c19ae63d5e67
|
[
"MIT"
] | null | null | null |
randomfun.py
|
angelruvalcaba/learning_python
|
5998dfd8a38e63793e45962ccc69c19ae63d5e67
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Write a program that does the following
# 1. generates N random sequences
# 2. each sequence has GC composition S and length L
# 3. calculate GC in a sliding window of size W
# 4. calculate entropy in a sliding window of size W
#
# Parameters N, S, L, and W are command line parameters
# Hint: write functions
import math
import random
import sys
N = int(sys.argv[1])
S = float(sys.argv[2])
L = int(sys.argv[3])
W = int(sys.argv[4])
print(N, S, L, W)
bp = 'ATCG'
nt = 0
GC = 0
c = 0
for i in range(N):
seq = ''
for j in range(L):
nt = random.choice('ATCG')
seq += nt
for c in range(L - W):
c += 1
window = seq[c-1: c+W]
print(window, seq)
GC = window.count('C') + window.count('G')
print(GC/W)
"""
python3 randomfun.py 1 0.5 15 7
AATTACAGATCGTGT
gc
0.1429
0.2857
0.2857
0.2857
0.4286
0.5714
0.4286
0.5714
0.4286
entropy
1.3788
1.8424
1.8424
1.8424
1.8424
1.9502
1.9502
1.8424
1.8424
"""
| 14.888889
| 55
| 0.657783
|
4a07097064b06dca21ae7658fa3f59d58d99efeb
| 528
|
py
|
Python
|
greendns/handler_base.py
|
yishaibeeri/greendns
|
a098f2d768fe88fca37bbc94dcbf3199f58a5473
|
[
"MIT"
] | 212
|
2018-12-06T07:40:38.000Z
|
2021-11-16T11:30:42.000Z
|
greendns/handler_base.py
|
faicker/pychinadns
|
00d5d08d1ef950ee0faa49434c54e1699d2ee442
|
[
"MIT"
] | 5
|
2019-03-29T06:05:08.000Z
|
2020-04-12T10:50:06.000Z
|
greendns/handler_base.py
|
faicker/pychinadns
|
00d5d08d1ef950ee0faa49434c54e1699d2ee442
|
[
"MIT"
] | 16
|
2019-01-24T01:17:47.000Z
|
2021-08-04T08:22:20.000Z
|
# -*- coding: utf-8 -*-
from greendns import session
class HandlerBase(object):
def __init__(self):
pass
def add_arg(self, parser):
pass
def parse_arg(self, parser, remaining_argv):
pass
def init(self, io_engine):
return []
def new_session(self):
return session.Session()
def on_client_request(self, sess):
return (True, None)
def on_upstream_response(self, sess, addr):
return None
def on_timeout(self, sess):
return None
| 18.206897
| 48
| 0.609848
|
4a0709a8a4ce8360e5b0c5ca5795a8aeae7ef1ae
| 1,044
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/media/v20200201preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/media/v20200201preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/media/v20200201preview/__init__.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_media_graph import *
from .media_graph import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:media/v20200201preview:MediaGraph":
return MediaGraph(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "media/v20200201preview", _module_instance)
_register_module()
| 30.705882
| 104
| 0.68295
|
4a070a3c2d965ca625e54117fa8fe0d222783b11
| 2,892
|
py
|
Python
|
hms_reddit/main.py
|
haum/reddithaum
|
986733b7155ebee3bcea5f235132b8d2db83bf1f
|
[
"MIT"
] | 1
|
2019-07-16T18:21:04.000Z
|
2019-07-16T18:21:04.000Z
|
hms_reddit/main.py
|
haum/hms_reddit
|
986733b7155ebee3bcea5f235132b8d2db83bf1f
|
[
"MIT"
] | 2
|
2015-12-28T10:29:49.000Z
|
2016-01-20T09:52:49.000Z
|
hms_reddit/main.py
|
haum/reddithaum
|
986733b7155ebee3bcea5f235132b8d2db83bf1f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Romain Porte (MicroJoe)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import logging
import time
from requests.exceptions import ReadTimeout
import coloredlogs
from hms_reddit import settings
from hms_reddit.retrieve import Retriever
from hms_reddit.notify import Notifier
from pika.exceptions import ConnectionClosed
def get_logger():
return logging.getLogger(__name__)
def poll_loop():
while True:
try:
# Check new submissions, and eventually notify them
get_logger().info('Checking new submissions...')
ret.check_submissions()
except ReadTimeout:
# API read timeout that sometimes happens
get_logger().error('Read timeout, restarting bot.')
except ConnectionClosed:
# Sometimes RabbitMQ connection will time out if not used, we have
# to connect back to the server.
get_logger().error(
'Disconnected from RabbitMQ, restarting bot.')
# Recreate objects in order to reconnect to the server
no = Notifier()
ret = Retriever(no)
except RuntimeError as e:
# Other error, we catch it in order to not crash the bot
get_logger().error(e)
# Sleep before next poll
for _ in range(settings.POLL_REDDIT_EVERY.seconds):
time.sleep(1)
def main():
# Install logger
coloredlogs.install(level='INFO')
# Create objects
no = Notifier()
ret = Retriever(no)
# Start infinite poll loop
try:
poll_loop(no, ret)
except KeyboardInterrupt:
# Graceful shutdown on SIGINT
get_logger().critical("Got a KeyboardInterrupt")
get_logger().info("Disconnecting from RabbitMQ")
no.disconnect()
get_logger().info("Goodbye")
sys.exit(0)
| 32.133333
| 79
| 0.691217
|
4a070a4e2e89b0d9ed4272566c8c66b60811be37
| 1,612
|
py
|
Python
|
02_fractals/volumeaxis.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2017-07-02T09:06:28.000Z
|
2020-09-11T04:23:14.000Z
|
02_fractals/volumeaxis.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2021-03-31T19:14:07.000Z
|
2021-06-01T23:34:32.000Z
|
02_fractals/volumeaxis.py
|
Tjorriemorrie/trading
|
aafa15a6c564bfa86948ab30e33d554172b38a3e
|
[
"MIT"
] | 2
|
2016-03-29T07:51:16.000Z
|
2016-10-30T04:53:58.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
currencies = [
'AUDUSD',
'EURGBP',
'EURJPY',
'EURUSD',
'GBPJPY',
'GBPUSD',
'NZDUSD',
'USDCAD',
'USDCHF',
'USDJPY',
]
for currency in currencies:
print '\n' + currency
print 'loading daily...'
daily = pd.read_csv(
r'../' + currency + '1440.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
parse_dates=[[0, 1]],
index_col=0,
)
# data = daily.as_matrix()
# opens = data[:, 0].astype(float)
# highs = data[:, 1].astype(float)
# lows = data[:, 2].astype(float)
# closes = data[:, 3].astype(float)
# volumes = data[:, 4].astype(int)
# print daily
obv = []
dayPrev = daily.irow(0)
for datetime, day in daily.iterrows():
diff = day['close'] - dayPrev['close']
if 'JPY' in currency:
diff /= 100
obv.append(diff * day['volume'])
dayPrev = day
print len(obv), 'OBV'
# print obv
plt.plot(obv)
plt.show()
chunked = []
tmp = 0
for i, ob in enumerate(obv):
# print ob
tmp += ob
if abs(tmp) >= 500:
# print tmp
chunked.append(tmp)
tmp = 0
print len(chunked), 'chunked'
plt.plot(chunked)
plt.show()
grouped = []
tmp = 0
n = 500
for i, chunk in enumerate(chunked):
start = max(0, i - n)
s = sum(obv[start:i])
grouped.append(s / n)
print len(grouped), 'grouped'
plt.plot(grouped)
plt.show()
# break
| 20.935065
| 73
| 0.511166
|
4a070ad6dd420714c49f7b4a8b7f99beeda9df30
| 3,208
|
py
|
Python
|
src/handlers/ksp.py
|
siebert/KSP
|
fc5e874ba4f2c1b5d0b8c6034c129416ff65ea33
|
[
"BSD-2-Clause-FreeBSD"
] | 23
|
2015-03-07T03:01:04.000Z
|
2022-01-31T01:26:30.000Z
|
src/handlers/ksp.py
|
siebert/KSP
|
fc5e874ba4f2c1b5d0b8c6034c129416ff65ea33
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2021-01-21T21:11:14.000Z
|
2021-01-21T21:11:14.000Z
|
src/handlers/ksp.py
|
pwr/KSP
|
fc5e874ba4f2c1b5d0b8c6034c129416ff65ea33
|
[
"BSD-2-Clause-FreeBSD"
] | 6
|
2015-01-25T00:40:18.000Z
|
2021-01-13T16:29:06.000Z
|
import logging
from handlers import TODO_PATH, CDE_PATH, FIRS_PATH
from handlers.dummy import Dummy, DummyResponse
import devices
import config, features
_FIRST_CONTACT = '''
<?xml version="1.0" encoding="UTF-8"?>
<response>
<total_count>0</total_count>
<next_pull_time/>
<items>
<item action="UPLOAD" type="SCFG" key="KSP.upload.scfg" priority="50" is_incremental="false" sequence="0" url="$_SERVER_URL_$ksp/scfg"/>
<item action="SET" type="SCFG" key="KSP.set.scfg" priority="600" is_incremental="false" sequence="0">$_SERVERS_CONFIG_$</item>
<item action="UPLOAD" type="SNAP" key="KSP.upload.snap" priority="1100" is_incremental="false" sequence="0"
url="$_SERVER_URL_$FionaCDEServiceEngine/UploadSnapshot"/>
</items>
</response>
'''.replace('\t', '').replace('\n', '')
def _first_contact(request, device):
# triggered actions:
# - upload config, for debugging purposes (we can check the API urls config in the logs)
# - update client API urls, customized for the particular client type
# - upload snapshot -- it will include device serial and model for the kindles
text = _FIRST_CONTACT \
.replace('$_SERVER_URL_$', config.server_url(request)) \
.replace('$_SERVERS_CONFIG_$', _servers_config(request, device))
return bytes(text, 'UTF-8')
def _servers_config(request, device):
is_kindle = device.is_kindle()
server_url = config.server_url(request)
def _url(x):
# always drop the last / from the url
# the kindle devices urls also need to include the service paths (FionaTodoListProxy, FionaCDEServiceEngine, etc)
# the other clients seem to require urls without those paths
return (server_url + x.strip('/')) if is_kindle else server_url[:-1]
# we always need the todo and cde urls
urls = [ '', 'url.todo=' + _url(TODO_PATH), 'url.cde=' + _url(CDE_PATH) ]
if is_kindle:
# cookie domains ensures we get the proper cookie and are able to identify the device
urls.append('cookie.store.domains=.amazon.com,' + config.server_hostname)
# we need these urls to intercept registration/deregistration calls,
# so that we can update the client certificate
urls.extend((
'url.firs=' + _url(FIRS_PATH),
'url.firs.unauth=' + _url(FIRS_PATH),
))
else:
urls.append('url.firs=' + _url(FIRS_PATH))
# not sure what this is for, but all non-kindle clients seem to have it
urls.append('url.cde.nossl=' + _url(CDE_PATH))
# all other clients queue up the logs upload commands
if not features.allow_logs_upload:
if is_kindle:
ignore = config.server_url(request) + 'ksp/ignore'
urls.extend((
'url.messaging.post=' + ignore,
'url.det=' + ignore,
'url.det.unauth=' + ignore,
))
# else:
# urls.extend((
# 'url.messaging.post=',
# 'url.det=',
# 'url.det.unauth=',
# ))
urls.append('')
return '\n'.join(urls)
class KSP_Handler (Dummy):
def __init__(self):
Dummy.__init__(self, 'ksp', '/ksp')
def call(self, request, device):
if request.path.startswith('/ksp/ignore'):
return 200
if request.command == 'PUT' and request.path == '/ksp/scfg':
logging.debug("got client configuration:\n%s", request.body)
return 200
logging.warn("unknown /ksp call %s", request.path)
return 200
| 34.494624
| 139
| 0.700125
|
4a070b6c90a19e5160f9fb6e8d323ade18938395
| 502
|
py
|
Python
|
raindrop/wsgi.py
|
Ronyonka/rain-drop
|
ad9ba2bd70cd587e5ab5f45f317c4054c5960bbd
|
[
"Unlicense"
] | null | null | null |
raindrop/wsgi.py
|
Ronyonka/rain-drop
|
ad9ba2bd70cd587e5ab5f45f317c4054c5960bbd
|
[
"Unlicense"
] | 4
|
2019-12-04T23:38:38.000Z
|
2022-02-10T07:55:43.000Z
|
raindrop/wsgi.py
|
Ronyonka/rain-drop
|
ad9ba2bd70cd587e5ab5f45f317c4054c5960bbd
|
[
"Unlicense"
] | null | null | null |
"""
WSGI config for raindrop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "raindrop.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| 26.421053
| 79
| 0.7749
|
4a070b789ddc4f835182dd7e7fe4ee151b898410
| 569
|
py
|
Python
|
backend/main/migrations/0001_initial.py
|
Djooonni/dilci_back
|
704114d526af863b0089670fa229a725b87f2d6c
|
[
"Apache-2.0"
] | null | null | null |
backend/main/migrations/0001_initial.py
|
Djooonni/dilci_back
|
704114d526af863b0089670fa229a725b87f2d6c
|
[
"Apache-2.0"
] | null | null | null |
backend/main/migrations/0001_initial.py
|
Djooonni/dilci_back
|
704114d526af863b0089670fa229a725b87f2d6c
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.1 on 2020-08-26 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='gps_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
]
| 24.73913
| 114
| 0.58348
|
4a070c6dcab27b3773343d4f88738f61019b9c50
| 29,979
|
py
|
Python
|
pysmashgg/filters.py
|
JeremySkalla/pysmashgg
|
7176d553b59bbab33c2095ed34d0259432896bc2
|
[
"MIT"
] | 10
|
2021-01-14T17:59:13.000Z
|
2022-03-01T18:20:12.000Z
|
pysmashgg/filters.py
|
JeremySkalla/pysmashgg
|
7176d553b59bbab33c2095ed34d0259432896bc2
|
[
"MIT"
] | 2
|
2022-03-01T12:44:00.000Z
|
2022-03-16T15:02:16.000Z
|
pysmashgg/filters.py
|
JeremySkalla/pysmashgg
|
7176d553b59bbab33c2095ed34d0259432896bc2
|
[
"MIT"
] | 4
|
2021-09-26T19:25:50.000Z
|
2022-03-16T11:43:47.000Z
|
# TOURNAMENTS.PY AND EVENTS.PY
# Filtering for the player_id function
def player_id_filter(response, player_name):
if response['data']['event']['entrants']['nodes'] is None:
return
for node in response['data']['event']['entrants']['nodes'][0]['participants']:
if node['gamerTag'].lower() == player_name.lower():
player_id = node['player']['id']
elif (node['participants'][0]['gamerTag'].split("|")[-1]).lower() == player_name.lower():
player_id = node['player']['id']
return player_id
# Filter for the event_id function
def event_id_filter(response, event_name):
if response['data']['tournament'] is None:
return
for event in response['data']['tournament']['events']:
if event['slug'].split("/")[-1] == event_name:
return event['id']
return
# Filtering for the show function
def show_filter(response):
if response['data']['tournament'] is None:
return
data = {}
data['id'] = response['data']['tournament']['id']
data['name'] = response['data']['tournament']['name']
data['country'] = response['data']['tournament']['countryCode']
data['state'] = response['data']['tournament']['addrState']
data['city'] = response['data']['tournament']['city']
data['startTimestamp'] = response['data']['tournament']['startAt']
data['endTimestamp'] = response['data']['tournament']['endAt']
data['entrants'] = response['data']['tournament']['numAttendees']
return data
# Filtering for the show_with_brackets function
def show_with_brackets_filter(response, event_name):
if response['data']['tournament'] is None:
return
data = {}
data['id'] = response['data']['tournament']['id']
data['name'] = response['data']['tournament']['name']
data['country'] = response['data']['tournament']['countryCode']
data['state'] = response['data']['tournament']['addrState']
data['city'] = response['data']['tournament']['city']
data['startTimestamp'] = response['data']['tournament']['startAt']
data['endTimestamp'] = response['data']['tournament']['endAt']
data['entrants'] = response['data']['tournament']['numAttendees']
for event in response['data']['tournament']['events']:
if event['slug'].split("/")[-1] == event_name:
data['eventId'] = event['id']
data['eventName'] = event['name']
data['eventSlug'] = event['slug'].split('/')[-1]
bracket_ids = []
if event['phaseGroups'] is not None:
for node in event['phaseGroups']:
bracket_ids.append(node['id'])
data['bracketIds'] = bracket_ids
break
return data
# Filtering for the show_with_brackets_all function
def show_with_brackets_all_filter(response):
if response['data']['tournament'] is None:
return
data = {}
data['id'] = response['data']['tournament']['id']
data['name'] = response['data']['tournament']['name']
data['country'] = response['data']['tournament']['countryCode']
data['state'] = response['data']['tournament']['addrState']
data['city'] = response['data']['tournament']['city']
data['startTimestamp'] = response['data']['tournament']['startAt']
data['endTimestamp'] = response['data']['tournament']['endAt']
data['entrants'] = response['data']['tournament']['numAttendees']
for event in response['data']['tournament']['events']:
bracket_ids = []
if event['phaseGroups'] is not None:
for node in event['phaseGroups']:
bracket_ids.append(node['id'])
del event['phaseGroups']
event['bracketIds'] = bracket_ids
data['events'] = response['data']['tournament']['events']
return data
# Filter for the show_events function
def show_events_filter(response):
if response['data']['tournament'] is None:
return
event_list = []
for event in response['data']['tournament']['events']:
cur_event = {}
cur_event['id'] = event['id']
cur_event['name'] = event['name']
cur_event['slug'] = event['slug'].split('/')[-1]
cur_event['entrants'] = event['numEntrants']
event_list.append(cur_event)
return event_list
# Filter for the show_sets function
def show_sets_filter(response):
if response['data']['event'] is None:
return
if response['data']['event']['sets']['nodes'] is None:
return
sets = [] # Need for return at the end
for node in response['data']['event']['sets']['nodes']:
if (node['slots'][0]['entrant'] is None or node['slots'][1]['entrant'] is None):
continue # This fixes a bug when tournament ends early
cur_set = {}
cur_set['id'] = node['id']
cur_set['entrant1Id'] = node['slots'][0]['entrant']['id']
cur_set['entrant2Id'] = node['slots'][1]['entrant']['id']
cur_set['entrant1Name'] = node['slots'][0]['entrant']['name']
cur_set['entrant2Name'] = node['slots'][1]['entrant']['name']
# Next 2 if/else blocks make sure there's a result in, sometimes DQs are weird
# there also could be ongoing matches
match_done = True
if node['slots'][0]['standing'] is None:
cur_set['entrant1Score'] = -1
match_done = False
elif node['slots'][0]['standing']['stats']['score']['value'] is not None:
cur_set['entrant1Score'] = node['slots'][0]['standing']['stats']['score']['value']
else:
cur_set['entrant1Score'] = -1
if node['slots'][1]['standing'] is None:
cur_set['entrant2Score'] = -1
match_done = False
elif node['slots'][1]['standing']['stats']['score']['value'] is not None:
cur_set['entrant2Score'] = node['slots'][1]['standing']['stats']['score']['value']
else:
cur_set['entrant2Score'] = -1
# Determining winner/loser (elif because sometimes smashgg won't give us one)
if match_done:
cur_set['completed'] = True
if node['slots'][0]['standing']['placement'] == 1:
cur_set['winnerId'] = cur_set['entrant1Id']
cur_set['loserId'] = cur_set['entrant2Id']
cur_set['winnerName'] = cur_set['entrant1Name']
cur_set['loserName'] = cur_set['entrant2Name']
elif node['slots'][0]['standing']['placement'] == 2:
cur_set['winnerId'] = cur_set['entrant2Id']
cur_set['loserId'] = cur_set['entrant1Id']
cur_set['winnerName'] = cur_set['entrant2Name']
cur_set['loserName'] = cur_set['entrant1Name']
else:
cur_set['completed'] = False
cur_set['bracketName'] = node['phaseGroup']['phase']['name']
cur_set['bracketId'] = node['phaseGroup']['id']
# This gives player_ids, but it also is made to work with team events
for j in range(0, 2):
players = []
for user in node['slots'][j]['entrant']['participants']:
cur_player = {}
cur_player['playerId'] = user['player']['id']
cur_player['playerTag'] = user['player']['gamerTag']
players.append(cur_player)
cur_set['entrant' + str(j+1) + 'Players'] = players
sets.append(cur_set) # Adding that specific set onto the large list of sets
return sets
# Filters for the show_players function
def show_entrants_filter(response):
if response['data']['event'] is None:
return
if response['data']['event']['standings']['nodes'] is None:
return
entrants = [] # Need for return at the end
for node in response['data']['event']['standings']['nodes']:
cur_entrant = {}
cur_entrant['entrantId'] = node['entrant']['id']
cur_entrant['tag'] = node['entrant']['name']
cur_entrant['finalPlacement'] = node['placement']
if node['entrant']['seeds'] is None:
cur_entrant['seed'] = -1
else:
cur_entrant['seed'] = node['entrant']['seeds'][0]['seedNum']
players = []
for user in node['entrant']['participants']:
cur_player = {}
if user['player']['id'] is not None:
cur_player['playerId'] = user['player']['id']
else:
cur_player['playerId'] = "None"
cur_player['playerTag'] = user['player']['gamerTag']
players.append(cur_player)
cur_entrant['entrantPlayers'] = players
entrants.append(cur_entrant)
return entrants
# Filter for the show_events_brackets function
def show_events_brackets_filter(response, event_name):
if response['data']['tournament'] is None:
return
brackets = {}
for event in response['data']['tournament']['events']:
if event['slug'].split('/')[-1] == event_name:
bracket_ids = []
for node in event['phaseGroups']:
bracket_ids.append(node['id'])
brackets['eventName'] = event['name']
brackets['slug'] = event['slug']
brackets['bracketIds'] = bracket_ids
return brackets
# Filter for the show_all_event_brackets function
def show_all_event_brackets_filter(response):
if response['data']['tournament'] is None:
return
brackets = []
for event in response['data']['tournament']['events']:
cur_bracket = {}
bracket_ids = []
if event['phaseGroups'] is not None:
for node in event['phaseGroups']:
bracket_ids.append(node['id'])
cur_bracket['eventName'] = event['name']
cur_bracket['slug'] = event['slug']
cur_bracket['bracketIds'] = bracket_ids
brackets.append(cur_bracket)
return brackets
# Filter for the show_player_sets function
def show_entrant_sets_filter(response):
if response['data']['event'] is None:
return
if response['data']['event']['sets']['nodes'] is None:
return
sets = [] # Need for return at the end
for node in response['data']['event']['sets']['nodes']:
cur_set = {}
cur_set['id'] = node['id']
cur_set['entrant1Id'] = node['slots'][0]['entrant']['id']
cur_set['entrant2Id'] = node['slots'][1]['entrant']['id']
cur_set['entrant1Name'] = node['slots'][0]['entrant']['name']
cur_set['entrant2Name'] = node['slots'][1]['entrant']['name']
# Next 2 if/else blocks make sure there's a result in, sometimes DQs are weird
match_done = True
if node['slots'][0]['standing'] is None:
cur_set['entrant1Score'] = -1
match_done = False
elif node['slots'][0]['standing']['stats']['score']['value'] is not None:
cur_set['entrant1Score'] = node['slots'][0]['standing']['stats']['score']['value']
else:
cur_set['entrant1Score'] = -1
if node['slots'][1]['standing'] is None:
cur_set['entrant2Score'] = -1
match_done = False
elif node['slots'][1]['standing']['stats']['score']['value'] is not None:
cur_set['entrant2Score'] = node['slots'][1]['standing']['stats']['score']['value']
else:
cur_set['entrant2Score'] = -1
# Determining winner/loser (elif because sometimes smashgg won't give us one)
if match_done:
cur_set['completed'] = True
if node['slots'][0]['standing']['placement'] == 1:
cur_set['winnerId'] = cur_set['entrant1Id']
cur_set['loserId'] = cur_set['entrant2Id']
cur_set['winnerName'] = cur_set['entrant1Name']
cur_set['loserName'] = cur_set['entrant2Name']
elif node['slots'][0]['standing']['placement'] == 2:
cur_set['winnerId'] = cur_set['entrant2Id']
cur_set['loserId'] = cur_set['entrant1Id']
cur_set['winnerName'] = cur_set['entrant2Name']
cur_set['loserName'] = cur_set['entrant1Name']
else:
cur_set['completed'] = False
cur_set['setRound'] = node['fullRoundText']
cur_set['bracketId'] = node['phaseGroup']['id']
sets.append(cur_set) # Adding that specific set onto the large list of sets
return sets
# Filter for the show_head_to_head function
def show_head_to_head_filter(response, player2_name):
if response['data']['event'] is None:
return
if response['data']['event']['sets']['nodes'] is None:
return
sets = []
for node in response['data']['event']['sets']['nodes']:
cur_set = {}
# Yes, the if statement needs to be this long to account for all cases
# I don't want to run another query, smash.gg's API can be trash sometimes
if ((node['slots'][0]['entrant']['name'].split('|')[-1]).lower() == player2_name.lower()
or node['slots'][0]['entrant']['name'].lower() == player2_name.lower()
or (node['slots'][1]['entrant']['name'].split('|')[-1]).lower() == player2_name.lower()
or node['slots'][1]['entrant']['name'].lower() == player2_name.lower()):
cur_set = {}
cur_set['id'] = node['id']
cur_set['entrant1Id'] = node['slots'][0]['entrant']['id']
cur_set['entrant2Id'] = node['slots'][1]['entrant']['id']
cur_set['entrant1Name'] = node['slots'][0]['entrant']['name']
cur_set['entrant2Name'] = node['slots'][1]['entrant']['name']
# Next 2 if/else blocks make sure there's a result in, sometimes DQs are weird
match_done = True
if node['slots'][0]['standing'] is None:
cur_set['entrant1Score'] = -1
match_done = False
elif node['slots'][0]['standing']['stats']['score']['value'] is not None:
cur_set['entrant1Score'] = node['slots'][0]['standing']['stats']['score']['value']
else:
cur_set['entrant1Score'] = -1
if node['slots'][1]['standing'] is None:
cur_set['entrant2Score'] = -1
match_done = False
elif node['slots'][1]['standing']['stats']['score']['value'] is not None:
cur_set['entrant2Score'] = node['slots'][1]['standing']['stats']['score']['value']
else:
cur_set['entrant2Score'] = -1
# Determining winner/loser (elif because sometimes smashgg won't give us one)
if match_done:
cur_set['completed'] = True
if node['slots'][0]['standing']['placement'] == 1:
cur_set['winnerId'] = cur_set['entrant1Id']
cur_set['loserId'] = cur_set['entrant2Id']
cur_set['winnerName'] = cur_set['entrant1Name']
cur_set['loserName'] = cur_set['entrant2Name']
elif node['slots'][0]['standing']['placement'] == 2:
cur_set['winnerId'] = cur_set['entrant2Id']
cur_set['loserId'] = cur_set['entrant1Id']
cur_set['winnerName'] = cur_set['entrant2Name']
cur_set['loserName'] = cur_set['entrant1Name']
else:
cur_set['completed'] = False
cur_set['setRound'] = node['fullRoundText']
cur_set['bracketId'] = node['phaseGroup']['id']
sets.append(cur_set)
return sets
# Filter for the show_event_by_game_size_dated function
def show_event_by_game_size_dated_filter(response, size, videogame_id):
if response['data']['tournaments'] is None:
return
if response['data']['tournaments']['nodes'] is None:
return
events = []
for node in response['data']['tournaments']['nodes']:
for event in node['events']:
if event['videogame']['id'] == videogame_id and event['numEntrants'] >= size:
cur_event = {}
cur_event['tournamentName'] = node['name']
cur_event['tournamentSlug'] = node['slug'].split('/')[-1]
cur_event['tournamentId'] = node['id']
cur_event['online'] = node['isOnline']
cur_event['endAt'] = node['endAt']
cur_event['eventName'] = event['name']
cur_event['eventId'] = event['id']
cur_event['numEntrants'] = event['numEntrants']
events.append(cur_event)
return events
# Filter for the show_lightweight_results function
def show_lightweight_results_filter(response):
if response['data']['event'] is None:
return
if response['data']['event']['standings']['nodes'] is None:
return
entrants = []
for node in response['data']['event']['standings']['nodes']:
cur_entrant = {}
cur_entrant['placement'] = node['placement']
cur_entrant['name'] = node['entrant']['name'].split(' | ')[-1]
cur_entrant['id'] = node['entrant']['id']
entrants.append(cur_entrant)
return entrants
# Filter for the show_by_country function
def show_by_country_filter(response):
if response['data']['tournaments'] is None:
return
if response['data']['tournaments']['nodes'] is None:
return
tournaments = []
for node in response['data']['tournaments']['nodes']:
cur_tournament = {}
cur_tournament['id'] = node['id']
cur_tournament['name'] = node['name']
cur_tournament['slug'] = node['slug'].split('/')[-1]
cur_tournament['entrants'] = node['numAttendees']
cur_tournament['state'] = node['addrState']
cur_tournament['city'] = node['city']
cur_tournament['startTimestamp'] = node['startAt']
cur_tournament['endTimestamp'] = node['endAt']
# IMPLEMENT THIS ONCE I ACTUALLY UNDERSTAND HOW STATE WORKS
# if node['state'] == 3:
# cur_tournament['completed'] = True
# else:
# cur_tournament['completed'] = False
tournaments.append(cur_tournament)
return tournaments
# Filter for the show_by_state function
def show_by_state_filter(response):
if response['data']['tournaments'] is None:
return
if response['data']['tournaments']['nodes'] is None:
return
tournaments = []
for node in response['data']['tournaments']['nodes']:
cur_tournament = {}
cur_tournament['id'] = node['id']
cur_tournament['name'] = node['name']
cur_tournament['slug'] = node['slug'].split('/')[-1]
cur_tournament['entrants'] = node['numAttendees']
cur_tournament['city'] = node['city']
cur_tournament['startTimestamp'] = node['startAt']
cur_tournament['endTimestamp'] = node['endAt']
# IMPLEMENT THIS ONCE I ACTUALLY UNDERSTAND HOW STATE WORKS
# if node['state'] == 3:
# cur_tournament['completed'] = True
# else:
# cur_tournament['completed'] = False
tournaments.append(cur_tournament)
return tournaments
def show_by_radius_filter(response):
if response['data']['tournaments'] is None:
return
if response['data']['tournaments']['nodes'] is None:
return
tournaments = []
for node in response['data']['tournaments']['nodes']:
cur_tournament = {}
cur_tournament['id'] = node['id']
cur_tournament['name'] = node['name']
cur_tournament['slug'] = node['slug'].split('/')[-1]
cur_tournament['entrants'] = node['numAttendees']
cur_tournament['country'] = node['countryCode']
cur_tournament['state'] = node['addrState']
cur_tournament['city'] = node['city']
cur_tournament['startTimestamp'] = node['startAt']
cur_tournament['endTimestamp'] = node['endAt']
# IMPLEMENT THIS ONCE I ACTUALLY UNDERSTAND HOW STATE WORKS
# if node['state'] == 3:
# cur_tournament['completed'] = True
# else:
# cur_tournament['completed'] = False
tournaments.append(cur_tournament)
return tournaments
def show_players_by_sponsor_filter(response):
if response['data']['tournament'] is None:
return
if response['data']['tournament']['participants']['nodes'] is None:
return
players = []
for node in response['data']['tournament']['participants']['nodes']:
cur_player = {}
cur_player['tag'] = node['gamerTag']
if node['user'] is not None:
cur_player['playerId'] = response['user']['player']['id']
cur_player['name'] = response['user']['name']
cur_player['country'] = response['user']['location']['country']
cur_player['state'] = response['user']['location']['state']
cur_player['city'] = response['user']['location']['city']
players.append(cur_player)
return players
# BRACKETS.PY
# Filter for the show_entrants function
def bracket_show_entrants_filter(response):
if response['data']['phaseGroup'] is None:
return
if response['data']['phaseGroup']['seeds']['nodes'] is None:
return
entrants = [] # Need for return at the end
for node in response['data']['phaseGroup']['seeds']['nodes']:
cur_entrant = {}
cur_entrant['entrantId'] = node['entrant']['id']
cur_entrant['tag'] = node['entrant']['name']
cur_entrant['finalPlacement'] = node['placement']
cur_entrant['seed'] = node['seedNum']
players = []
for user in node['entrant']['participants']:
cur_player = {}
cur_player['playerId'] = user['player']['id']
cur_player['playerTag'] = user['player']['gamerTag']
players.append(cur_player)
cur_entrant['entrantPlayers'] = players
entrants.append(cur_entrant)
return entrants
# Filter for the show_sets function
def bracket_show_sets_filter(response):
if response['data']['phaseGroup'] is None:
return
if response['data']['phaseGroup']['sets']['nodes'] is None:
return
bracket_name = response['data']['phaseGroup']['phase']['name']
sets = [] # Need for return at the end
for node in response['data']['phaseGroup']['sets']['nodes']:
cur_set = {}
cur_set['id'] = node['id']
cur_set['entrant1Id'] = node['slots'][0]['entrant']['id']
cur_set['entrant2Id'] = node['slots'][1]['entrant']['id']
cur_set['entrant1Name'] = node['slots'][0]['entrant']['name']
cur_set['entrant2Name'] = node['slots'][1]['entrant']['name']
# Next 2 if/else blocks make sure there's a result in, sometimes DQs are weird
match_done = True
if node['slots'][0]['standing'] is None:
cur_set['entrant1Score'] = -1
match_done = False
elif node['slots'][0]['standing']['stats']['score']['value'] is not None:
cur_set['entrant1Score'] = node['slots'][0]['standing']['stats']['score']['value']
else:
cur_set['entrant1Score'] = -1
if node['slots'][0]['standing'] is None:
cur_set['entrant2Score'] = -1
match_done = False
elif node['slots'][1]['standing']['stats']['score']['value'] is not None:
cur_set['entrant2Score'] = node['slots'][1]['standing']['stats']['score']['value']
else:
cur_set['entrant2Score'] = -1
# Determining winner/loser (elif because sometimes smashgg won't give us one)
if match_done:
cur_set['completed'] = True
if node['slots'][0]['standing']['placement'] == 1:
cur_set['winnerId'] = cur_set['entrant1Id']
cur_set['loserId'] = cur_set['entrant2Id']
cur_set['winnerName'] = cur_set['entrant1Name']
cur_set['loserName'] = cur_set['entrant2Name']
elif node['slots'][0]['standing']['placement'] == 2:
cur_set['winnerId'] = cur_set['entrant2Id']
cur_set['loserId'] = cur_set['entrant1Id']
cur_set['winnerName'] = cur_set['entrant2Name']
cur_set['loserName'] = cur_set['entrant1Name']
else:
cur_set['completed'] = False
cur_set['bracketName'] = bracket_name
for j in range(0, 2):
players = []
for user in node['slots'][j]['entrant']['participants']:
cur_player = {}
cur_player['playerId'] = user['player']['id']
cur_player['playerTag'] = user['player']['gamerTag']
players.append(cur_player)
cur_set['entrant' + str(j+1) + 'Players'] = players
sets.append(cur_set) # Adding that specific set onto the large list of sets
return sets
# PLAYERS.PY
# Filter for the get_info function
def player_show_info_filter(response):
if response['data']['player'] is None:
return
player = {}
player['tag'] = response['data']['player']['gamerTag']
player['name'] = response['data']['player']['user']['name']
player['bio'] = response['data']['player']['user']['name']
player['country'] = response['data']['player']['user']['location']['country']
player['state'] = response['data']['player']['user']['location']['state']
player['city'] = response['data']['player']['user']['location']['city']
player['rankings'] = response['data']['player']['rankings']
return player
# Filter for the get_tournaments function
def player_show_tournaments_filter(response):
if response['data']['player'] is None:
return
if response['data']['player']['user']['tournaments']['nodes'] is None:
return
tournaments = []
for node in response['data']['player']['user']['tournaments']['nodes']:
cur_tournament = {}
cur_tournament['name'] = node['name']
cur_tournament['slug'] = node['slug'].split('/')[-1]
cur_tournament['id'] = node['id']
cur_tournament['attendees'] = node['numAttendees']
cur_tournament['country'] = node['countryCode']
cur_tournament['unixTimestamp'] = node['startAt']
tournaments.append(cur_tournament)
return tournaments
# Filter for the show_tournaments_for_game function
def player_show_tournaments_for_game(response, videogame_id):
if response['data']['player'] is None:
return
if response['data']['player']['user']['tournaments']['nodes'] is None:
return
tournaments = []
# This is really janky code because of the really janky query
# that I had to submit, but it works! Looking for a better way to make this query still
for node in response['data']['player']['user']['tournaments']['nodes']:
for event in node['events']:
if event['videogame']['id'] == videogame_id and event['entrants']['nodes'] is not None:
cur_tournament = {}
cur_tournament['name'] = node['name']
cur_tournament['slug'] = node['slug'].split('/')[-1]
cur_tournament['id'] = node['id']
cur_tournament['attendees'] = node['numAttendees']
cur_tournament['country'] = node['countryCode']
cur_tournament['startTimestamp'] = node['startAt']
cur_tournament['eventName'] = event['name']
cur_tournament['eventSlug'] = event['slug'].split('/')[-1]
cur_tournament['eventId'] = event['id']
cur_tournament['eventEntrants'] = event['numEntrants']
tournaments.append(cur_tournament)
return tournaments
# LEAGUES.PY
# Filter for the show function
def league_show_filter(response):
if response['data']['league'] is None:
return
data = {}
data['id'] = response['data']['league']['id']
data['name'] = response['data']['league']['name']
data['startTimestamp'] = response['data']['league']['startAt']
data['endTimestamp'] = response['data']['league']['endAt']
data['games'] = response['data']['league']['videogames']
return data
# Filter for the show_schedule function
def league_show_schedule_filter(response):
if response['data']['league'] is None:
return
if response['data']['league']['events']['nodes'] is None:
return
events = []
for node in response['data']['league']['events']['nodes']:
cur_event = {}
cur_event['eventId'] = node['id']
cur_event['eventName'] = node['name']
cur_event['eventSlug'] = node['slug'].split('/')[-1]
cur_event['eventStartTimestamp'] = node['startAt']
cur_event['eventEntrants'] = node['numEntrants']
cur_event['tournamentId'] = node['tournament']['id']
cur_event['tournamentName'] = node['tournament']['name']
cur_event['tournamentSlug'] = node['tournament']['slug'].split('/')[-1]
events.append(cur_event)
return events
# Filter for the show_standings function
def league_show_standings_filter(response):
if response['data']['league'] is None:
return
if response['data']['league']['standings']['nodes'] is None:
return
players = []
for node in response['data']['league']['standings']['nodes']:
cur_player = {}
cur_player['id'] = node['id']
cur_player['standing'] = node['placement']
if node['player'] is not None: # Smashgg is weird sometimes
cur_player['name'] = node['player']['gamerTag']
cur_player['playerId'] = node['player']['id']
else:
cur_player['name'] = "smashgg has a bug, ignore this one and playerId please -- very sorry"
cur_player['playerId'] = None
players.append(cur_player)
return players
| 37.948101
| 103
| 0.576103
|
4a070c81498515be6adb46921fe7f7221193ead9
| 10,601
|
py
|
Python
|
lib-python/3/ctypes/test/test_callbacks.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
lib-python/3/ctypes/test/test_callbacks.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-02-22T00:59:49.000Z
|
2022-02-22T00:59:49.000Z
|
lib-python/3/ctypes/test/test_callbacks.py
|
olliemath/pypy
|
8b873bd0b8bf76075aba3d915c260789f26f5788
|
[
"Apache-2.0",
"OpenSSL"
] | 1
|
2022-03-30T11:42:37.000Z
|
2022-03-30T11:42:37.000Z
|
import functools
import unittest
from test import support
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
class Callbacks(unittest.TestCase):
functype = CFUNCTYPE
## def tearDown(self):
## import gc
## gc.collect()
def callback(self, *args):
self.got_args = args
return args[-1]
def check_type(self, typ, arg):
PROTO = self.functype.__func__(typ, typ)
result = PROTO(self.callback)(arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (arg,))
self.assertEqual(result, arg)
PROTO = self.functype.__func__(typ, c_byte, typ)
result = PROTO(self.callback)(-3, arg)
if typ == c_float:
self.assertAlmostEqual(result, arg, places=5)
else:
self.assertEqual(self.got_args, (-3, arg))
self.assertEqual(result, arg)
################
def test_byte(self):
self.check_type(c_byte, 42)
self.check_type(c_byte, -42)
def test_ubyte(self):
self.check_type(c_ubyte, 42)
def test_short(self):
self.check_type(c_short, 42)
self.check_type(c_short, -42)
def test_ushort(self):
self.check_type(c_ushort, 42)
def test_int(self):
self.check_type(c_int, 42)
self.check_type(c_int, -42)
def test_uint(self):
self.check_type(c_uint, 42)
def test_long(self):
self.check_type(c_long, 42)
self.check_type(c_long, -42)
def test_ulong(self):
self.check_type(c_ulong, 42)
def test_longlong(self):
self.check_type(c_longlong, 42)
self.check_type(c_longlong, -42)
def test_ulonglong(self):
self.check_type(c_ulonglong, 42)
def test_float(self):
# only almost equal: double -> float -> double
import math
self.check_type(c_float, math.e)
self.check_type(c_float, -math.e)
def test_double(self):
self.check_type(c_double, 3.14)
self.check_type(c_double, -3.14)
def test_longdouble(self):
self.check_type(c_longdouble, 3.14)
self.check_type(c_longdouble, -3.14)
def test_char(self):
self.check_type(c_char, b"x")
self.check_type(c_char, b"a")
# disabled: would now (correctly) raise a RuntimeWarning about
# a memory leak. A callback function cannot return a non-integral
# C type without causing a memory leak.
@unittest.skip('test disabled')
def test_char_p(self):
self.check_type(c_char_p, "abc")
self.check_type(c_char_p, "def")
@support.refcount_test
def test_pyobject(self):
o = ()
from sys import getrefcount as grc
for o in (), [], object():
initial = grc(o)
# This call leaks a reference to 'o'...
self.check_type(py_object, o)
before = grc(o)
# ...but this call doesn't leak any more. Where is the refcount?
self.check_type(py_object, o)
after = grc(o)
self.assertEqual((after, o), (before, o))
def test_unsupported_restype_1(self):
# Only "fundamental" result types are supported for callback
# functions, the type must have a non-NULL stgdict->setfunc.
# POINTER(c_double), for example, is not supported.
prototype = self.functype.__func__(POINTER(c_double))
# The type is checked when the prototype is called
self.assertRaises(TypeError, prototype, lambda: None)
def test_unsupported_restype_2(self):
prototype = self.functype.__func__(object)
self.assertRaises(TypeError, prototype, lambda: None)
def test_issue_7959(self):
proto = self.functype.__func__(None)
class X(object):
def func(self): pass
def __init__(self):
self.v = proto(self.func)
import gc
for i in range(32):
X()
gc.collect()
live = [x for x in gc.get_objects()
if isinstance(x, X)]
self.assertEqual(len(live), 0)
def test_issue12483(self):
import gc
class Nasty:
def __del__(self):
gc.collect()
CFUNCTYPE(None)(lambda x=Nasty(): None)
@need_symbol('WINFUNCTYPE')
class StdcallCallbacks(Callbacks):
try:
functype = WINFUNCTYPE
except NameError:
pass
################################################################
class SampleCallbacksTestCase(unittest.TestCase):
def test_integrate(self):
# Derived from some then non-working code, posted by David Foster
dll = CDLL(_ctypes_test.__file__)
# The function prototype called by 'integrate': double func(double);
CALLBACK = CFUNCTYPE(c_double, c_double)
# The integrate function itself, exposed from the _ctypes_test dll
integrate = dll.integrate
integrate.argtypes = (c_double, c_double, CALLBACK, c_long)
integrate.restype = c_double
def func(x):
return x**2
result = integrate(0.0, 1.0, CALLBACK(func), 10)
diff = abs(result - 1./3.)
self.assertLess(diff, 0.01, "%s not less than 0.01" % diff)
def test_issue_8959_a(self):
from ctypes.util import find_library
libc_path = find_library("c")
if not libc_path:
self.skipTest('could not find libc')
libc = CDLL(libc_path)
@CFUNCTYPE(c_int, POINTER(c_int), POINTER(c_int))
def cmp_func(a, b):
return a[0] - b[0]
array = (c_int * 5)(5, 1, 99, 7, 33)
libc.qsort(array, len(array), sizeof(c_int), cmp_func)
self.assertEqual(array[:], [1, 5, 7, 33, 99])
@need_symbol('WINFUNCTYPE')
def test_issue_8959_b(self):
from ctypes.wintypes import BOOL, HWND, LPARAM
global windowCount
windowCount = 0
@WINFUNCTYPE(BOOL, HWND, LPARAM)
def EnumWindowsCallbackFunc(hwnd, lParam):
global windowCount
windowCount += 1
return True #Allow windows to keep enumerating
windll.user32.EnumWindows(EnumWindowsCallbackFunc, 0)
def test_callback_register_int(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_int, c_int, c_int, c_int, c_int, c_int)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_int
func.argtypes = (c_int, c_int, c_int, c_int, c_int, CALLBACK)
func.restype = c_int
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(2, 3, 4, 5, 6, CALLBACK(callback))
self.assertEqual(result, callback(2*2, 3*3, 4*4, 5*5, 6*6))
def test_callback_register_double(self):
# Issue #8275: buggy handling of callback args under Win64
# NOTE: should be run on release builds as well
dll = CDLL(_ctypes_test.__file__)
CALLBACK = CFUNCTYPE(c_double, c_double, c_double, c_double,
c_double, c_double)
# All this function does is call the callback with its args squared
func = dll._testfunc_cbk_reg_double
func.argtypes = (c_double, c_double, c_double,
c_double, c_double, CALLBACK)
func.restype = c_double
def callback(a, b, c, d, e):
return a + b + c + d + e
result = func(1.1, 2.2, 3.3, 4.4, 5.5, CALLBACK(callback))
self.assertEqual(result,
callback(1.1*1.1, 2.2*2.2, 3.3*3.3, 4.4*4.4, 5.5*5.5))
def test_callback_large_struct(self):
class Check: pass
# This should mirror the structure in Modules/_ctypes/_ctypes_test.c
class X(Structure):
_fields_ = [
('first', c_ulong),
('second', c_ulong),
('third', c_ulong),
]
def callback(check, s):
check.first = s.first
check.second = s.second
check.third = s.third
# See issue #29565.
# The structure should be passed by value, so
# any changes to it should not be reflected in
# the value passed
s.first = s.second = s.third = 0x0badf00d
check = Check()
s = X()
s.first = 0xdeadbeef
s.second = 0xcafebabe
s.third = 0x0bad1dea
CALLBACK = CFUNCTYPE(None, X)
dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_cbk_large_struct
func.argtypes = (X, CALLBACK)
func.restype = None
# the function just calls the callback with the passed structure
func(s, CALLBACK(functools.partial(callback, check)))
self.assertEqual(check.first, s.first)
self.assertEqual(check.second, s.second)
self.assertEqual(check.third, s.third)
self.assertEqual(check.first, 0xdeadbeef)
self.assertEqual(check.second, 0xcafebabe)
self.assertEqual(check.third, 0x0bad1dea)
# See issue #29565.
# Ensure that the original struct is unchanged.
self.assertEqual(s.first, check.first)
self.assertEqual(s.second, check.second)
self.assertEqual(s.third, check.third)
def test_callback_too_many_args(self):
def func(*args):
return len(args)
CTYPES_MAX_ARGCOUNT = 1024
proto = CFUNCTYPE(c_int, *(c_int,) * CTYPES_MAX_ARGCOUNT)
cb = proto(func)
args1 = (1,) * CTYPES_MAX_ARGCOUNT
self.assertEqual(cb(*args1), CTYPES_MAX_ARGCOUNT)
args2 = (1,) * (CTYPES_MAX_ARGCOUNT + 1)
with self.assertRaises(ArgumentError):
cb(*args2)
def test_convert_result_error(self):
def func():
return ("tuple",)
proto = CFUNCTYPE(c_int)
ctypes_func = proto(func)
with support.catch_unraisable_exception() as cm:
# don't test the result since it is an uninitialized value
result = ctypes_func()
self.assertIsInstance(cm.unraisable.exc_value, TypeError)
self.assertEqual(cm.unraisable.err_msg,
"Exception ignored on converting result "
"of ctypes callback function")
self.assertIs(cm.unraisable.object, func)
if __name__ == '__main__':
unittest.main()
| 32.518405
| 79
| 0.597208
|
4a070cb1044671f38afdc8e4d81fe33f99e90bae
| 31,851
|
py
|
Python
|
archive/decoders/decoder_unroll_iw.py
|
kevinbdsouza/VaeLM
|
31e2d6abc0f49f2ca72d3f22ff17484859fc3601
|
[
"MIT"
] | null | null | null |
archive/decoders/decoder_unroll_iw.py
|
kevinbdsouza/VaeLM
|
31e2d6abc0f49f2ca72d3f22ff17484859fc3601
|
[
"MIT"
] | null | null | null |
archive/decoders/decoder_unroll_iw.py
|
kevinbdsouza/VaeLM
|
31e2d6abc0f49f2ca72d3f22ff17484859fc3601
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.rnn import _transpose_batch_time
class Decoder:
def __init__(self, **kwargs):
self.encodings = None
self.num_sentence_characters = kwargs['num_sentence_characters']
self.dict_length = kwargs['dict_length']
self.max_num_lat_words = kwargs['max_num_lat_words']
self.batch_size = kwargs['batch_size']
self.simple_decoder = True
self.global_lat_decoder = False
self.decoder_units = kwargs['decoder_units']
self.units_encoder_lstm = kwargs['encoder_dim']
self.lat_word_dim = kwargs['lat_word_dim']
self.global_lat_dim = kwargs['global_lat_dim']
self.decoder_p3_units = kwargs['decoder_p3_units']
def make_global_latent(self, values, reuse,units_dense):
mean_pool = tf.reduce_mean(values, axis=-1)
with tf.variable_scope('global_lat_var_scope',reuse=reuse):
pre_dist1 = tf.layers.dense(inputs=mean_pool, activation=tf.nn.relu, units=units_dense,name='layer1_global_lat')
pre_dist2 = tf.layers.dense(inputs=pre_dist1, activation=None, units=units_dense * 2,name='layer2_global_lat')
mu, log_sig = tf.split(tf.cast(pre_dist2, dtype=tf.float32), axis=-1, num_or_size_splits=2)
mu = mu * 10
log_sig = log_sig - 3
return mu, log_sig
def decoder1_p1(self, reuse, units_bilstm, encodings=None):
if encodings is None:
encodings = self.encodings
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=encodings, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=self.sentence_lens)
values = tf.concat(values, 2)
return values
def decoder2_p1(self, reuse, units_bilstm, global_latent):
# needs some work
# input = [global_latent for i in range(self.num_sentence_characters)]
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=input, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=tf.cast(hap_lens, tf.int32))
values = tf.concat(values, 2)
return values
def bahd_attention(self, queries, values, reuse):
with tf.variable_scope('attention_layer', reuse=reuse):
w1 = tf.get_variable(name='query_w', shape=[self.decoder_units, self.lat_word_dim])
w2 = tf.get_variable(name='value_w', shape=[self.lat_word_dim, self.lat_word_dim])
v = tf.get_variable(name='v', shape=[self.lat_word_dim])
conv_q = tf.reshape(tf.einsum('ij,jk->ik', queries, w1), [-1, 1, self.lat_word_dim])
a_p1 = tf.reshape(tf.tile(conv_q, [1, 1, self.max_num_lat_words]),[self.batch_size, self.max_num_lat_words, self.lat_word_dim])
a_p2 = tf.einsum('ijk,kl->ijl', values, w2)
out = tf.einsum('k,ijk->ij', v, tf.nn.tanh(name='combine', x=a_p1 + a_p2))
out_norm = tf.nn.softmax(out, dim=-1)
context = tf.reduce_sum(values * tf.reshape(tf.stack([out_norm for _ in range(self.lat_word_dim)], -1),[self.batch_size, self.max_num_lat_words, self.lat_word_dim]),axis=-2)
l1 = tf.reshape(context, [self.batch_size, self.lat_word_dim])
# l1 = tf.reshape(l1,[self.batch_size,self.lat_word_dim+self.decoder_units])
return l1
def decoder_p2(self, num_hidden_word_units, inputs, char_sequence_length, global_latent, reuse, context_dim, max_time):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
context = self.bahd_attention(
queries=tf.zeros(shape=[self.batch_size, num_hidden_word_units], dtype=tf.float32), values=inputs,
reuse=None)
# next_input = tf.concat([tf.zeros(shape=[self.batch_size,self.lat_word_dim],dtype=tf.float32),tf.zeros(shape=[self.batch_size,self.global_lat_dim],dtype=tf.float32)],axis=-1)
next_input = tf.zeros(shape=[self.batch_size, self.lat_word_dim + self.global_lat_dim],dtype=tf.float32)
else:
next_cell_state = cell_state
context = self.bahd_attention(queries=cell_output, values=inputs, reuse=True)
# should try passing in logits
# should also try doing the final decoding in a seperate RNN
# should try using a global latent vector here asap
# prediction = tf.layers.dense(inputs=context,activation=None,units=self.dict_length)
# took context out of decoder loop because softmax may be saturating
next_input = tf.concat([context, global_latent], axis=-1)
next_loop_state = loop_state.write(time - 1, context)
elements_finished = (time >= char_sequence_length)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p2', reuse=reuse):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
# loop_state_out = _transpose_batch_time(loop_state_ta.stack())
return loop_state_ta
def decoder_p3(self, inputs, reuse, max_time, char_sequence_length):
# _inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time,name='context_array')
# _inputs_ta = _inputs_ta.unstack(tf.transpose(inputs,[1,0,2]))
_inputs_ta = inputs
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time, name='pred_char_array')
cell = tf.contrib.rnn.LSTMCell(self.decoder_p3_units)
def loop_fn(time, cell_output, cell_state, loop_state):
next_loop_state = loop_state
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.dict_length], dtype=tf.float32), _inputs_ta.read(time)],
axis=-1)
next_loop_state = outputs_ta
else:
next_cell_state = cell_state
prediction = tf.layers.dense(inputs=cell_output, activation=None, units=self.dict_length)
next_loop_state = loop_state.write(time - 1, prediction)
next_input = tf.concat([prediction, _inputs_ta.read(time)], axis=-1)
# argmax seems to be working a bit better, funny as it's not differentiable
# next_input = tf.concat([tf.one_hot(tf.argmax(prediction, -1), depth=self.dict_length, axis=-1), _inputs_ta.read(time)],axis=-1)
elements_finished = (time >= char_sequence_length - 1)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p3', reuse=reuse):
_, _, loop_ta = tf.nn.raw_rnn(cell, loop_fn)
output = _transpose_batch_time(loop_ta.stack())
return output
def run_decoder(self, units_lstm_decoder,train,word_sequence_length, char_sequence_length, units_dense_global, lat_words, reuse):
if self.simple_decoder:
global_mu, global_logsig = self.make_global_latent(reuse=reuse,values=lat_words, units_dense=units_dense_global)
eps = tf.random_normal(shape=[self.batch_size, units_dense_global], dtype=tf.float32)
if train:
global_latent = eps * tf.exp(global_logsig) + global_mu
else:
global_latent=global_mu
out_2 = self.decoder_p2(char_sequence_length=char_sequence_length, num_hidden_word_units=units_lstm_decoder,
inputs=lat_words, reuse=reuse, global_latent=global_latent,
context_dim=units_lstm_decoder, max_time=self.num_sentence_characters)
out = self.decoder_p3(inputs=out_2, reuse=reuse, max_time=self.num_sentence_characters,
char_sequence_length=char_sequence_length)
return out, global_latent, global_logsig, global_mu
def prior(self, values, num_units, global_latent, word_lens, reuse):
global_latent = tf.transpose(tf.stack([global_latent for _ in range(self.max_num_lat_words)]), [1, 0, 2])
print(' PRIOR input dim from post {}'.format(values))
values = tf.concat([tf.zeros(shape=[self.batch_size, 1, self.lat_word_dim], dtype=tf.float32), values], axis=1)
values = values[:, 0:-1, :]
values = tf.concat([tf.cast(values, dtype=tf.float32), global_latent], axis=-1)
print('PRIOR input dim to prior {}'.format(values))
with tf.variable_scope('prior', reuse=reuse):
cell = tf.contrib.rnn.LSTMCell(num_units)
values, _ = tf.nn.dynamic_rnn(cell=cell, inputs=values, sequence_length=word_lens, dtype=tf.float32)
with tf.variable_scope('prior/rnn', reuse=reuse):
w = tf.get_variable(name='prior_dense_w', shape=[self.lat_word_dim, self.lat_word_dim * 2],
dtype=tf.float32)
b = tf.get_variable(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf.float32)
out = tf.reshape(tf.matmul(tf.reshape(values, [-1, self.lat_word_dim]), w) + b,
[self.batch_size, self.max_num_lat_words, self.lat_word_dim * 2])
mu, log_sig = tf.split(out, axis=-1, num_or_size_splits=2, name='prior_dense')
print('MU{}'.format(mu))
return [mu, log_sig]
def unrolled_prior(self,values, num_units, global_latent, word_lens, reuse):
#inputs_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_lat_words)
values = tf.transpose(values,[1,0,2])
#values.set_shape([self.max_num_lat_words,self.batch_size,self.lat_word_dim])
#inputs_ta.unstack(values)
mean_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_lat_words)
logsigmas_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_lat_words)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = (mean_ta, logsigmas_ta)
# self.lat_word_dim is very important, need from kevin
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32), global_latent], axis=-1)
else:
next_cell_state = cell_state
with tf.variable_scope('prior_pred', reuse=reuse):
w = tf.get_variable(name='prior_dense_w', shape=[self.lat_word_dim, self.lat_word_dim * 2],
dtype=tf.float32)
b = tf.get_variable(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf.float32)
cell_output = tf.reshape(tf.matmul(cell_output, w) + b, [self.batch_size, self.lat_word_dim * 2])
mu, logsig = tf.split(cell_output, axis=-1, num_or_size_splits=2)
#eps = tf.random_normal(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)
#samples_word = eps * tf.exp(logsig) + mu
next_input = tf.concat([values[time-1], global_latent], axis=-1)
next_loop_state = (loop_state[0].write(time - 1, mu),loop_state[1].write(time - 1, logsig))
elements_finished = (time >= word_lens)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('prior', reuse=reuse):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
mean_state_out = _transpose_batch_time(loop_state_ta[0].stack())
logsigma_state_out = _transpose_batch_time(loop_state_ta[1].stack())
return [mean_state_out,logsigma_state_out]
def cost_function(self,eow_mask, mask_kl, predictions, true_input, global_mu, global_logsig, prior_mu, prior_logsig,
posterior_mu, posterior_logsig, shift, total_steps, global_step, kl=True):
mask = tf.reduce_sum(true_input, -1)
# reconstruction = tf.reduce_sum(tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1),-1)
reconstruction = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask
reconstruction = tf.reduce_sum(reconstruction,-1)
#reconstruction = tf.reduce_sum(reconstruction*eow_mask,-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.exp(posterior_logsig - prior_logsig), axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1) - tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
(prior_logsig - posterior_logsig), axis=-1))
'''
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.reduce_sum(tf.log(tf.square(tf.exp(prior_logsig))),axis=1) - tf.reduce_sum(tf.log(tf.square(tf.exp(posterior_logsig))),axis=1),axis=-1) -
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) * tf.cast(tf.shape(prior_mu)[1],dtype=tf.float32) +
tf.reduce_sum(tf.reduce_sum(tf.divide(1,tf.square(tf.exp(prior_logsig))) * tf.square(tf.exp(posterior_logsig)),axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.square(tf.exp(prior_logsig))) * (posterior_mu - prior_mu),
axis=-1),axis=-1))'''
kl_p1 = 0.5 * (tf.reduce_sum((tf.log(tf.square(tf.exp(prior_logsig))) -
tf.log(tf.square(tf.exp(posterior_logsig)))), axis=-1) -
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
tf.divide(1, tf.square(tf.exp(prior_logsig))) * tf.square(tf.exp(posterior_logsig)),
axis=-1) + tf.reduce_sum(((posterior_mu - prior_mu) * tf.divide(1,
tf.square(tf.exp(prior_logsig))) * (
posterior_mu - prior_mu)), axis=-1))
# have to mask out for padding (there will be numbers there due to the dense after the rnn)
kl_p1 = tf.reduce_sum((kl_p1 * mask_kl), -1)
'''
kl_global_lat = 0.5 * (
tf.reduce_sum(tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1) - tf.cast(
tf.shape(global_mu)[-1], dtype=tf.float32) - tf.reduce_sum(global_logsig))
'''
kl_global_lat = 0.5 * (
-tf.reduce_sum(tf.log(tf.square(tf.exp(global_logsig))), axis=-1) - tf.cast(tf.shape(global_mu)[-1],
dtype=tf.float32) + tf.reduce_sum(
tf.square(tf.exp(global_logsig)), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1))
kl_p2 = kl_p1
# kl_p2 = tf.reduce_sum(kl_p1, -1)
if kl:
kl_p3 = kl_p2 + kl_global_lat
anneal_c = tf.cast(tf.minimum(tf.maximum(tf.divide((global_step - shift), total_steps), 0), 1),
dtype=tf.float32)
kl_p3 = kl_p3* anneal_c
else:
anneal_c = tf.constant(0,dtype=tf.float32)
kl_p3 = tf.constant(0, dtype=tf.float32)
# sum over all seperate KLs for each lat var
cost = tf.reduce_mean(kl_p3 + reconstruction)
return cost, reconstruction, kl_p3, kl_p1, kl_global_lat, kl_p2, anneal_c
def test_cost_function(self, predictions, mask_kl, true_input, global_mu, global_logsig, prior_mu, prior_logsig,
posterior_mu, posterior_logsig):
mask = tf.reduce_sum(true_input, -1)
reconstruction = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask,
-1)
# reconstruction = tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
# kl_p1 = 0.5*(tf.reduce_sum(tf.exp(posterior_logsig-prior_logsig),axis=-1)+tf.reduce_sum((posterior_mu-prior_mu)*tf.divide(1,tf.exp(prior_logsig))*(posterior_mu-prior_mu),axis=-1)-tf.cast(tf.shape(posterior_mu)[-1],dtype=tf.float32)+tf.reduce_sum((prior_logsig-posterior_logsig),axis=-1))
# kl_global_lat = 0.5*(tf.reduce_sum(tf.exp(global_logsig),axis=-1)+ tf.reduce_sum((global_mu*global_mu),axis=-1)-tf.cast(tf.shape(global_mu)[-1],dtype=tf.float32)-tf.reduce_sum(global_logsig))
# sum over all seperate KLs for each lat var
'''kl_p1 = 0.5 * (tf.reduce_sum(tf.reduce_sum(tf.log(tf.square(tf.exp(prior_logsig))),axis=1) - tf.reduce_sum(tf.log(tf.square(tf.exp(posterior_logsig))),axis=1),axis=-1) -
# the constant term is not correct as each sentence will have a different number of latent word variables,but is just a constant so shouldn't change the optimization
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) * tf.cast(tf.shape(prior_mu)[1],dtype=tf.float32) +
tf.reduce_sum(tf.reduce_sum(tf.divide(1,tf.square(tf.exp(prior_logsig))) * tf.square(tf.exp(posterior_logsig)),axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.square(tf.exp(prior_logsig))) * (posterior_mu - prior_mu),
axis=-1) ,axis=-1))'''
kl_p1 = 0.5 * (tf.reduce_sum((tf.log(tf.square(tf.exp(prior_logsig))) -
tf.log(tf.square(tf.exp(posterior_logsig)))), axis=-1) -
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
tf.divide(1, tf.square(tf.exp(prior_logsig))) * tf.square(tf.exp(posterior_logsig)),
axis=-1) + tf.reduce_sum(((posterior_mu - prior_mu) * tf.divide(1,
tf.square(tf.exp(prior_logsig))) * (
posterior_mu - prior_mu)), axis=-1))
# have to mask out for padding (there will be numbers there due to the dense after the rnn)
kl_p1 = tf.reduce_sum((kl_p1 * mask_kl), -1)
kl_global_lat = 0.5 * (
-tf.reduce_sum(tf.log(tf.square(tf.exp(global_logsig))), axis=-1) - tf.cast(tf.shape(global_mu)[-1],
dtype=tf.float32) + tf.reduce_sum(
tf.square(tf.exp(global_logsig)), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1))
kl_p3 = kl_p1 + kl_global_lat
cost = tf.reduce_mean(kl_p3 + reconstruction)
return cost, reconstruction, kl_p3, kl_p1
def calc_cost(self,eow_mask, mask_kl, kl, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, sentence_word_lens, predictions, shift, total_steps, global_step, reuse):
prior_mu, prior_logsig = self.unrolled_prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=reuse)
cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c = self.cost_function(eow_mask=eow_mask,mask_kl=mask_kl, kl=kl,
predictions=predictions,
true_input=true_input,
global_mu=global_mu,
global_logsig=global_logsig,
prior_mu=prior_mu,
prior_logsig=prior_logsig,
posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig,
shift=shift,
total_steps=total_steps,
global_step=global_step)
kl_hist = tf.cond(tf.cast(global_step, dtype=tf.float32) > shift * 1.5, lambda: tf.reduce_mean(kl_p1),
lambda: tf.zeros(shape=tf.shape(tf.reduce_mean(kl_p1)), dtype=tf.float32))
self.kls_hist = tf.summary.histogram('kls', tf.reduce_mean(kl_hist))
self.global_kl_scalar = tf.summary.scalar('kls_global', tf.reduce_mean(kl_global))
self.rec_scalar = tf.summary.scalar('rec', tf.reduce_mean(reconstruction))
self.cost_scalar = tf.summary.scalar('full_cost', cost)
var_all = tf.nn.moments(x=posterior_mu, axes=0)
var_all = var_all[-1]
kl = tf.reduce_mean(kl_p3)
self.full_kl_scalar = tf.summary.scalar('full_kl', kl)
var_all = tf.cond(tf.cast(global_step, dtype=tf.float32) > shift * 1.5, lambda: var_all,
lambda: tf.zeros(shape=tf.shape(var_all), dtype=tf.float32))
self.sum_all_activ_hist = tf.summary.histogram('active_lats_all', var_all)
var_g = tf.nn.moments(x=global_mu, axes=0)
var_g = var_g[-1]
var_g = tf.cond(tf.cast(global_step, dtype=tf.float32) > shift * 1.5, lambda: var_g,
lambda: tf.zeros(shape=tf.shape(var_g), dtype=tf.float32))
self.sum_global_activ_hist = tf.summary.histogram('active_lats_global', var_g)
return cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c, prior_mu
def test_calc_cost(self, mask_kl, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, predictions, sentence_word_lens):
prior_mu, prior_logsig = self.unrolled_prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=True)
cost, reconstruction, kl_p3, kl_p1 = self.test_cost_function(mask_kl=mask_kl, predictions=predictions,
true_input=true_input, global_mu=global_mu,
global_logsig=global_logsig, prior_mu=prior_mu,
prior_logsig=prior_logsig,
posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig)
self.sum_rec_val = tf.summary.scalar('rec_test', tf.reduce_mean(reconstruction))
self.sum_kl_val = tf.summary.scalar('kl_test', tf.reduce_mean(kl_p3))
return cost
def generation(self, samples):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_lat_words)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
print('GENER samples {}'.format(np.shape(samples)))
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
# self.lat_word_dim is very important, need from kevin
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32), samples], axis=-1)
else:
next_cell_state = cell_state
with tf.variable_scope('prior_pred', reuse=True):
w = tf.get_variable(name='prior_dense_w')
b = tf.get_variable(name='prior_dense_b')
cell_output = tf.reshape(tf.matmul(cell_output, w) + b, [self.batch_size, self.lat_word_dim * 2])
mu, logsig = tf.split(cell_output, axis=-1, num_or_size_splits=2)
eps = tf.random_normal(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)
samples_word = eps * tf.exp(logsig) + mu
next_input = tf.concat([samples_word, samples], axis=-1)
next_loop_state = loop_state.write(time - 1, samples_word)
elements_finished = (time >= self.max_num_lat_words)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('prior', reuse=True):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
loop_state_out = _transpose_batch_time(loop_state_ta.stack())
context = self.decoder_p2(num_hidden_word_units=self.lat_word_dim, inputs=loop_state_out,
char_sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
global_latent=samples, reuse=True, context_dim=self.decoder_units,
max_time=self.num_sentence_characters)
predictions = self.decoder_p3(inputs=context, reuse=True,
char_sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
max_time=self.num_sentence_characters)
return predictions
def IW_loglike(self,word_lat_samples,word_lat_mu,decoder_dim,word_lat_logsig,true_output,char_lens,word_lens):
post_dist = tf.distributions.Normal(loc=word_lat_mu,scale=tf.exp(word_lat_logsig))
out, global_latent, global_logsig, global_mu = self.run_decoder(units_lstm_decoder=decoder_dim,train=True,word_sequence_length=None, char_sequence_length=tf.cast(char_lens,dtype=tf.int32), units_dense_global=self.global_lat_dim, lat_words=word_lat_samples, reuse=True)
mean_state_out, logsigma_state_out=self.unrolled_prior(values=word_lat_samples, num_units=self.units_encoder_lstm, global_latent=global_latent, word_lens=word_lens, reuse=True)
prior_dist = tf.distributions.Normal(loc = mean_state_out,scale=tf.exp(logsigma_state_out))
log_prob_ev = true_output*tf.log(tf.nn.softmax(out))
global_post_dist = tf.distributions.Normal(loc=global_mu,scale=tf.exp(global_logsig))
global_prior_dist = tf.distributions.Normal(loc = tf.zeros(shape=[self.batch_size,self.global_lat_dim]),scale = tf.ones(shape=[self.batch_size,self.global_lat_dim]))
log_prob_post_words = post_dist.log_prob(value=word_lat_samples)
log_prob_prior_words = prior_dist.log_prob(value=word_lat_samples)
log_prob_post_global = global_post_dist.log_prob(value=global_latent)
log_prob_prior_global = global_prior_dist.log_prob(value=global_latent)
LL = tf.reduce_sum(log_prob_ev,[-2,-1])*tf.divide(1,char_lens)+tf.reduce_sum(log_prob_prior_words-log_prob_post_words,[-2,-1])+tf.reduce_sum(log_prob_prior_global-log_prob_post_global,[-1])
return LL
def vanilla_decoder(self, inputs, reuse):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time, name='pred_char_array')
cell = tf.contrib.rnn.LSTMCell(self.decoder_p3_units)
def loop_fn(time, cell_output, cell_state, loop_state):
next_loop_state = loop_state
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.dict_length + self.lat_word_dim], dtype=tf.float32)],
axis=-1)
next_loop_state = outputs_ta
else:
next_cell_state = cell_state
prediction = tf.layers.dense(inputs=cell_output, activation=None, units=self.dict_length)
next_loop_state = loop_state.write(time - 1, prediction)
next_input = tf.concat([prediction, inputs], axis=-1)
elements_finished = (time >= sequence_length - 1)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('vanilla_decoder', reuse=reuse):
_, _, loop_ta = tf.nn.raw_rnn(cell, loop_fn)
output = _transpose_batch_time(loop_ta.stack())
return output
# Example usage
# batch_len = np.random.randint(low=0,high=30,size=[10])
# arg_dict = {'global_lat_dim':10,'word_lens':batch_len,'batch_size':10,'max_num_words':30,'decoder_units':40,'encodings' : np.random.randn(10,30,40),'sentence_lens':np.random.randint(low=0,high=30,size=10),'num_sentence_characters':200,'dict_length':26}
# decoder = Decoder(**arg_dict)
# word_encoding_placeholder=tf.placeholder(dtype=tf.float32,shape=[decoder.batch_size,decoder.max_num_words,np.shape(decoder.encodings)[-1]])
# out_o, global_latent_o,global_logsig_o,global_mu_o = decoder.run_decoder(units_lstm_decoder=40,lat_words=word_encoding_placeholder,units_dense_global=40,sequence_length=batch_len)
# true_mat =np.zeros(shape=[decoder.batch_size,decoder.num_sentence_characters],dtype=np.float32)
# for k,i in enumerate(batch_len):
# true_mat[k,0:i] = np.random.randint(low=0,high=decoder.dict_length,size=[i])
# true_inp=true_mat
# posterior_mu =np.random.randn(10,30,40)
# posterior_logsig = np.exp(np.random.randn(10,30,40))
# cost= decoder.calc_cost(prior_mu=posterior_mu,prior_logsig=posterior_logsig,global_latent_sample=global_latent_o,global_logsig=global_logsig_o,global_mu=global_mu_o,predictions=out_o,true_input=tf.one_hot(indices=true_inp,depth =decoder.dict_length),posterior_logsig=posterior_logsig,posterior_mu=posterior_mu,post_samples=decoder.encodings)
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# cost_o=sess.run([cost],feed_dict={word_encoding_placeholder:decoder.encodings})
| 64.345455
| 343
| 0.620232
|
4a070db23f3985ea7eeef5fdd19170e4991969a4
| 422
|
py
|
Python
|
easy_blog_django/posts/migrations/0007_post_slug.py
|
giantryansaul/easy_blog_django
|
9f06cc9de8ab0593865568c3fec1aa33e4b05fb6
|
[
"BSD-3-Clause"
] | null | null | null |
easy_blog_django/posts/migrations/0007_post_slug.py
|
giantryansaul/easy_blog_django
|
9f06cc9de8ab0593865568c3fec1aa33e4b05fb6
|
[
"BSD-3-Clause"
] | null | null | null |
easy_blog_django/posts/migrations/0007_post_slug.py
|
giantryansaul/easy_blog_django
|
9f06cc9de8ab0593865568c3fec1aa33e4b05fb6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_post_tags'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default='slug'),
preserve_default=False,
),
]
| 20.095238
| 51
| 0.582938
|
4a070e2f74b7a331397c4267fee57cfaa35ebc2b
| 8,186
|
py
|
Python
|
MVP_Tao/train.py
|
MedicML/MedicML
|
c81d807ba84b48efce366db76e0e246523acda2a
|
[
"BSD-2-Clause"
] | 128
|
2017-11-10T14:24:49.000Z
|
2022-03-21T14:36:46.000Z
|
MVP_Tao/train.py
|
MedicML/MedicML
|
c81d807ba84b48efce366db76e0e246523acda2a
|
[
"BSD-2-Clause"
] | 12
|
2017-11-20T17:17:48.000Z
|
2019-06-20T10:37:56.000Z
|
MVP_Tao/train.py
|
MedicML/MedicML
|
c81d807ba84b48efce366db76e0e246523acda2a
|
[
"BSD-2-Clause"
] | 32
|
2017-11-13T11:05:24.000Z
|
2021-04-12T13:52:09.000Z
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import argparse
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from dltk.core.metrics import dice
from dltk.core.activations import leaky_relu
from dltk.io.abstract_reader import Reader
from neuronet import neuronet_3d
from reader import read_fn
import json
# PARAMS
EVAL_EVERY_N_STEPS = 1000
EVAL_STEPS = 10
NUM_CHANNELS = 1
BATCH_SIZE = 1
SHUFFLE_CACHE_SIZE = 16
MAX_STEPS = 100000
# MODEL
def model_fn(features, labels, mode, params):
# 1. create a model and its outputs
def lrelu(x):
return leaky_relu(x, 0.1)
protocols = params["protocols"]
net_output_ops = neuronet_3d(features['x'],
num_classes=params["num_classes"],
protocols=protocols,
num_res_units=params["network"]["num_residual_units"],
filters=params["network"]["filters"],
strides=params["network"]["strides"],
activation=lrelu,
mode=mode)
# 1.1 Generate predictions only (for `ModeKeys.PREDICT`)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=net_output_ops,
export_outputs={'out': tf.estimator.export.PredictOutput(net_output_ops)})
# 2. set up a loss function
ce = []
for p in protocols:
ce.append(tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=net_output_ops['logits_{}'.format(p)],
labels=labels[p])))
# Sum the crossentropy losses and divide through number of protocols to be predicted
loss = tf.div(tf.add_n(ce), tf.constant(len(protocols), dtype=tf.float32))
# 3. define a training op and ops for updating moving averages (i.e. for batch normalisation)
global_step = tf.train.get_global_step()
optimiser = tf.train.AdamOptimizer(learning_rate=params["learning_rate"], epsilon=1e-5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimiser.minimize(loss, global_step=global_step)
# 4.1 (optional) create custom image summaries for tensorboard
my_image_summaries = {}
my_image_summaries['feat_t1'] = features['x'][0, 64, :, :, 0]
for p in protocols:
my_image_summaries['{}/lbl'.format(p)] = tf.cast(labels[p], tf.float32)[0, 64, :, :]
my_image_summaries['{}/pred'.format(p)] = tf.cast(net_output_ops['y_{}'.format(p)], tf.float32)[0, 64, :, :]
expected_output_size = [1, 128, 128, 1] # [B, W, H, C]
[tf.summary.image(name, tf.reshape(image, expected_output_size))
for name, image in my_image_summaries.items()]
# 4.2 (optional) create custom metric summaries for tensorboard
for i in range(len(protocols)):
p = protocols[i]
c = tf.constant(params["num_classes"][i])
mean_dice = tf.reduce_mean(tf.py_func(
dice, [net_output_ops['y_{}'.format(p)], labels[p], c], tf.float32)[1:])
tf.summary.scalar('dsc_{}'.format(p), mean_dice)
# 5. Return EstimatorSpec object
return tf.estimator.EstimatorSpec(mode=mode,
predictions=None,
loss=loss,
train_op=train_op,
eval_metric_ops=None)
def train(args, config):
np.random.seed(42)
tf.set_random_seed(42)
print('Setting up...')
# Parse csv files for file names
train_filenames = pd.read_csv(args.train_csv,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
val_filenames = pd.read_csv(args.val_csv,
dtype=object,
keep_default_na=False,
na_values=[]).as_matrix()
# Set up a data reader to handle the file i/o.
reader_params = {
'n_examples': 8,
'example_size': [128, 128, 128],
'extract_examples': True,
'protocols': config["protocols"]}
reader_example_shapes = {
'features': {'x': reader_params['example_size'] + [NUM_CHANNELS, ]},
'labels': {p: reader_params['example_size'] for p in config["protocols"]}}
reader = Reader(read_fn,
{'features': {'x': tf.float32},
'labels': {p: tf.int32 for p in config["protocols"]}})
# Get input functions and queue initialisation hooks for training and validation data
train_input_fn, train_qinit_hook = reader.get_inputs(
train_filenames,
tf.estimator.ModeKeys.TRAIN,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
val_input_fn, val_qinit_hook = reader.get_inputs(
val_filenames,
tf.estimator.ModeKeys.EVAL,
example_shapes=reader_example_shapes,
batch_size=BATCH_SIZE,
shuffle_cache_size=SHUFFLE_CACHE_SIZE,
params=reader_params)
# Instantiate the neural network estimator
nn = tf.estimator.Estimator(model_fn=model_fn,
model_dir=config["model_path"],
params=config,
config=tf.estimator.RunConfig(session_config=tf.ConfigProto()))
# Hooks for validation summaries
val_summary_hook = tf.contrib.training.SummaryAtEndHook(
os.path.join(config["model_path"], 'eval'))
step_cnt_hook = tf.train.StepCounterHook(
every_n_steps=EVAL_EVERY_N_STEPS, output_dir=config["model_path"])
print('Starting training...')
try:
for _ in range(MAX_STEPS // EVAL_EVERY_N_STEPS):
nn.train(input_fn=train_input_fn,
hooks=[train_qinit_hook, step_cnt_hook],
steps=EVAL_EVERY_N_STEPS)
results_val = nn.evaluate(input_fn=val_input_fn,
hooks=[val_qinit_hook, val_summary_hook],
steps=EVAL_STEPS)
print('Step = {}; val loss = {:.5f};'.format(results_val['global_step'], results_val['loss']))
except KeyboardInterrupt:
pass
print('Stopping now.')
export_dir = nn.export_savedmodel(
export_dir_base=config["model_path"],
serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))
print('Model saved to {}.'.format(export_dir))
if __name__ == '__main__':
# Set up argument parser
parser = argparse.ArgumentParser(description='NeuroNet training script')
parser.add_argument('--restart', default=False, action='store_true')
parser.add_argument('--verbose', default=False, action='store_true')
parser.add_argument('--cuda_devices', '-c', default='0')
parser.add_argument('--train_csv', default='train.csv')
parser.add_argument('--val_csv', default='val.csv')
parser.add_argument('--config', default='config_all.json')
args = parser.parse_args()
# Set verbosity
if args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
tf.logging.set_verbosity(tf.logging.INFO)
else:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
# GPU allocation options
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_devices
# Parse the run config
with open(args.config) as f:
config = json.load(f)
# Handle restarting and resuming training
if args.restart:
print('Restarting training from scratch.')
os.system('rm -rf {}'.format(config["model_path"]))
if not os.path.isdir(config["model_path"]):
os.system('mkdir -p {}'.format(config["model_path"]))
else:
print('Resuming training on model_path {}'.format(config["model_path"]))
# Call training
train(args, config)
| 35.903509
| 116
| 0.618495
|
4a070ecdd1045ac24bfc57e3aa6a218d7a14eaf4
| 1,048
|
py
|
Python
|
xlsxwriter/test/comparison/test_simple09.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_simple09.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_simple09.py
|
hugovk/XlsxWriter
|
e97cc66637d9895480ee32cfb5e561d652d3787b
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('simple09.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Test data out of range. These should be ignored.
worksheet.write('A0', 'foo')
worksheet.write(-1, -1, 'foo')
worksheet.write(0, -1, 'foo')
worksheet.write(-1, 0, 'foo')
worksheet.write(1048576, 0, 'foo')
worksheet.write(0, 16384, 'foo')
workbook.close()
self.assertExcelEqual()
| 25.560976
| 79
| 0.597328
|
4a070ed99b96b252c5154834a3bf65a27d1d3055
| 5,708
|
py
|
Python
|
UWMadCrawler/UWMadCrawler/spiders/UWMadSpider.py
|
badgerherald/Course-Guide-Crawler
|
c04455bac26358788b125ee2f8f0d1ee8c9c23d0
|
[
"MIT"
] | 1
|
2016-11-21T20:18:25.000Z
|
2016-11-21T20:18:25.000Z
|
UWMadCrawler/UWMadCrawler/spiders/UWMadSpider.py
|
badgerherald/Course-Guide-Crawler
|
c04455bac26358788b125ee2f8f0d1ee8c9c23d0
|
[
"MIT"
] | null | null | null |
UWMadCrawler/UWMadCrawler/spiders/UWMadSpider.py
|
badgerherald/Course-Guide-Crawler
|
c04455bac26358788b125ee2f8f0d1ee8c9c23d0
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python
# Program: UW-Madison Course Crawler
# __author__ = "Joseph Kelley"
# __copyright__ = "Copyright 2014"
# __license__ = "MIT"
# __version__ = "0.7"
# __maintainer__ = "Joseph Kelley"
# __email__ = "jckelley2@wisc.edu"
# __status__ = "Beta"
# This program will crawl the UW-Madison public facing course register. It will provide a live update
# of what classes are closed, waitlisted, or how many spots are still available. Note that this program is
# not hooked up to any database whatsoever, so if you wish to save the information crawled and not just print it
# to the screen, you will have to do so on your own.
from bs4 import BeautifulSoup
from datetime import datetime
import html5lib
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http.request import Request
from scrapy.http.request.form import FormRequest
from scrapy.utils.response import get_base_url
from urlparse import urljoin
from UWMadCrawler.items import CourseItem, SectionItem
class CourseSpider(BaseSpider):
name = "UWMad"
curTerm = '1152' # (Fall 2014 is 1152)
classes = []
start_urls = [
'http://public.my.wisc.edu/portal/f/u124l1s4/normal/render.uP',
]
def parse(self, response):
yield FormRequest.from_response(response, formnumber=1, formdata={'termChoice': self.curTerm, 'resultsPerPage': '200'},
callback=self.parse_result_page)
def parse_result_page(self, response):
body = response.body
body = re.sub(' ', " ", body)
body = re.sub(r'\bcollapsed""><strong>Starts\b', 'collapsed"><strong>Starts', body)
body = re.sub('(?s)<script(.*?)</script>', "", body)
soup = BeautifulSoup(body, "html5lib")
last_updated_text = soup.find("span", {"class": "dataRefreshTimestamp"}).get_text().strip()
last_updated_unix = datetime.strptime(last_updated_text, "%I:%M%p %b %d, %Y").strftime("%s")
# All courseResultULs are results, but not all CourseResults are. Use the courseResultsUL to get the
# parent courseResult, which is guaranteed to be a real result
for td_course in soup.findAll("td", {"class": "courseResultUL", "align": "center"}):
course = td_course.parent
tdata = course.find_all("td")
# Skip the first 2, they hold nothing
department = tdata[2].a["title"]
course_num = tdata[3].get_text().strip()
course_title = tdata[4].get_text().strip()
num_credits = tdata[5].get_text().strip()
desc = tdata[6].get_text().strip()
last_taught = tdata[7].get_text().strip()
# Now load sibling table, which contains course description and prerequisites
nextSibling = course.next_sibling.next_sibling
description = nextSibling.contents[3].contents[1].get_text().strip()
pReq = nextSibling.contents[3].contents[3].get_text().strip()
pReq = re.sub('Pre-Reqs:', "", pReq).decode('utf-8', 'ignore')
base_url = get_base_url(response)
current_class = CourseItem(
department=department,
course_num=course_num,
course_title=course_title,
credits=num_credits,
desc=desc,
last_taught=last_taught,
description=description,
pReq=pReq,
)
yield current_class
# Scrape class pages to get sections for current class
section_url = tdata[4].find("a")["href"]
section = Request(url=urljoin(base_url, section_url), callback=self.parse_section)
section.meta['department'] = department
section.meta['course_num'] = course_num
section.meta['course_title'] = course_title
section.meta['last_updated'] = last_updated_unix
yield section
# Advance to next page
next_page_url = soup.find("a", {"title": "go to next page"})["href"]
if next_page_url:
yield Request(url=urljoin(get_base_url(response), next_page_url), callback=self.parse_result_page)
def parse_section(self, response):
soup = BeautifulSoup(response.body)
for tr in soup.find_all("tr", {"class": "detailsClassSection"}):
tdata = tr.find_all("td")
class_no = tdata[0].get_text().strip()
sec_no = tdata[1].strong.get_text().strip()
session = tdata[2].get_text().strip()
time = tdata[3].get_text().strip()
place = tdata[4].get_text().strip()
teacher = tdata[5].get_text().strip()
num_credits = tdata[6].get_text().strip()
# TODO: Parse honors information in tdata[7]?
openSeats = tdata[8].get_text().strip()
seat_status = tdata[9].get("data-enrollmentstatus")
current_section = SectionItem(
department=response.meta['department'],
course_num=response.meta['course_num'],
course_title=response.meta['course_title'],
last_updated=response.meta['last_updated'],
class_no=class_no,
sec_no=sec_no,
session=session,
time=time,
place=place,
teacher=teacher,
credits=num_credits,
openSeats=openSeats,
seat_status=seat_status
)
yield current_section
| 42.597015
| 128
| 0.601962
|
4a070fbe44ce8af2408082d607933544971a0e31
| 2,225
|
py
|
Python
|
Python/available-captures-for-rook.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2022-01-30T06:55:28.000Z
|
2022-01-30T06:55:28.000Z
|
Python/available-captures-for-rook.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | null | null | null |
Python/available-captures-for-rook.py
|
RideGreg/LeetCode
|
b70818b1e6947bf29519a24f78816e022ebab59e
|
[
"MIT"
] | 1
|
2021-12-31T03:56:39.000Z
|
2021-12-31T03:56:39.000Z
|
# Time: O(1)
# Space: O(1)
# 999
# On an 8 x 8 chessboard, there is one white rook. There also may be empty squares, white bishops,
# and black pawns. These are given as characters 'R', '.', 'B', and 'p' respectively.
# Uppercase characters represent white pieces, and lowercase characters represent black pieces.
#
# The rook moves as in the rules of Chess: it chooses one of four cardinal directions (north, east,
# west, and south), then moves in that direction until it chooses to stop, reaches the edge of the board,
# or captures an opposite colored pawn by moving to the same square it occupies. Also, rooks cannot
# move into the same square as other friendly bishops.
#
# Return the number of pawns the rook can capture in one move.
class Solution(object):
def numRookCaptures(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
r, c = None, None
for i in xrange(8):
if r is not None:
break
for j in xrange(8):
if board[i][j] == 'R':
r, c = i, j
break
result = 0
for dr, dc in directions:
nr, nc = r+dr, c+dc
while 0 <= nr < 8 and 0 <= nc < 8:
if board[nr][nc] == 'p':
result += 1
if board[nr][nc] != '.':
break
nr, nc= nr+dr, nc+dc
return result
print(Solution().numRookCaptures([
[".",".",".",".",".",".",".","."],
[".",".",".","p",".",".",".","."],
[".",".",".","R",".",".",".","p"],
[".",".",".",".",".",".",".","."],
[".",".",".",".",".",".",".","."],
[".",".",".","p",".",".",".","."],
[".",".",".",".",".",".",".","."],
[".",".",".",".",".",".",".","."]
])) # 3
print(Solution().numRookCaptures([
[".",".",".",".",".",".",".","."],
[".",".",".","p",".",".",".","."],
[".",".",".","p",".",".",".","."],
["p","p",".","R",".","p","B","."],
[".",".",".",".",".",".",".","."],
[".",".",".","B",".",".",".","."],
[".",".",".","p",".",".",".","."],
[".",".",".",".",".",".",".","."]
])) # 3
| 34.230769
| 105
| 0.404045
|
4a071196f6af65017b9fb100c72cf42da9373a57
| 11,894
|
py
|
Python
|
python/oneflow/test/tensor/test_tensor_indexing.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | 1
|
2022-01-19T07:50:28.000Z
|
2022-01-19T07:50:28.000Z
|
python/oneflow/test/tensor/test_tensor_indexing.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/tensor/test_tensor_indexing.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
def test_basic_slice(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[1], x[1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2], x[-2].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0, 1], x[0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[(0, 1)], x[(0, 1)].numpy()))
test_case.assertTrue(np.allclose(numpy_x[((0, 1))], x[((0, 1))].numpy()))
test_case.assertTrue(np.allclose(numpy_x[None], x[None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[True], x[True].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None], x[1, None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None, 1], x[1, None, 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[1, None, None, 1], x[1, None, None, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:], x[:].numpy()))
test_case.assertTrue(np.allclose(numpy_x[:1], x[:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:1], x[0:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2:-1], x[-2:-1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[2:100:200], x[2:100:200].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ...], x[0:2, ...].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ..., 1], x[0:2, ..., 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, ..., 1, 1], x[0:2, ..., 1, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[0:4:2, ...], x[0:4:2, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, None, ..., True], x[0:2, None, ..., True].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[None, ..., 0:4:2, True], x[None, ..., 0:4:2, True].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[False, ...], x[False, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[False, True, ...], x[False, True, ...].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[True, ..., False, True], x[True, ..., False, True].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[True, None, ..., False, True],
x[True, None, ..., False, True].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[True, 1, ..., False, True], x[True, 1, ..., False, True].numpy()
)
)
def test_advanced_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[[0, 1]], x[[0, 1]].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], [1, 0]], x[[0, 1], [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[[[0, 1], [0, 1], [1, 0]]], x[[[0, 1], [0, 1], [1, 0]]].numpy()
)
)
test_case.assertTrue(np.allclose(numpy_x[[[0], [1]]], x[[[0], [1]]].numpy()))
test_case.assertTrue(
np.allclose(
numpy_x[[[[0], [1]], [[0], [1]], [0, 1]]],
x[[[[0], [1]], [[0], [1]], [0, 1]]].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]],
x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]].numpy(),
)
)
# Tensor index
test_case.assertTrue(
np.allclose(
numpy_x[np.array([0, 1]), np.array([1, 0])],
x[flow.tensor([0, 1]), flow.tensor([1, 0])].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[:, np.array([[0, 1], [1, 1]]), np.array([[1, 0], [1, 1]])],
x[:, flow.tensor([[0, 1], [1, 1]]), flow.tensor([[1, 0], [1, 1]]),].numpy(),
)
)
# mask tensor index
mask = np.random.rand(numpy_x.shape[0], numpy_x.shape[1]).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5, 1], x[y > 0.5, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0, 1], x[y > 0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1, 1], x[y > 1, 1].numpy()))
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
def test_advanced_indexing_array(test_case, numpy_x, dtype):
x = flow.tensor(numpy_x)
idx = np.array([0, 1], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx], x[idx].numpy()))
idx1 = np.array([0, 1], dtype=dtype)
idx2 = np.array([1, 0], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx1, idx2], x[idx1, idx2].numpy()))
idx = np.array([[0, 1], [0, 1], [1, 0]], dtype=dtype)
test_case.assertTrue(np.allclose(numpy_x[idx, :, :], x[idx, :, :].numpy()))
test_case.assertTrue(np.allclose(numpy_x[idx, idx, :], x[idx, idx, :].numpy()))
test_case.assertTrue(np.allclose(numpy_x[idx, idx, idx], x[idx, idx, idx].numpy()))
idx1 = np.array([[1, 0, 1], [1, 1, 0]])
idx2 = np.array([[0], [1]])
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, :, idx2].shape, x[:, idx1, :, idx2].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, 1, idx2].shape, x[:, idx1, 1, idx2].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[idx1, :, idx2, :].shape, x[idx1, :, idx2, :].shape)
)
test_case.assertTrue(
np.allclose(numpy_x[:, idx1, idx2, :].shape, x[:, idx1, idx2, :].shape)
)
def test_combining_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], 1:2, [1, 0]], x[[0, 1], 1:2, [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[:, [0, 1], [1, 0]], x[:, [0, 1], [1, 0]].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:, [0, 1], 1], x[:, [0, 1], 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[..., [0, 1], 1, [1, 0]], x[..., [0, 1], 1, [1, 0]].numpy())
)
def test_mask_getitem(test_case, numpy_x):
x = flow.tensor(numpy_x)
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0], x[y > 1.0].numpy()))
mask = np.random.rand(numpy_x.shape[0]).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0], x[y > 1.0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5, 1], x[y > 0.5, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1.0, 1], x[y > 1.0, 1].numpy()))
def test_mask_setitem(test_case, numpy_x):
x = flow.tensor(numpy_x)
# mask tensor index
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
# broadcast set
x[y > 0.5] = 1.0
numpy_x[mask > 0.5] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# elementwise set
update = np.random.randn((mask > 0.5).sum()).astype(np.float32)
tensor_update = flow.tensor(update)
x[y > 0.5] = tensor_update
numpy_x[mask > 0.5] = update
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# empty mask
x[y > 1.0] = 1.0
numpy_x[mask > 1.0] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestTensorIndexing(flow.unittest.TestCase):
def test_basic_slice(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
def test_advanced_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
def test_advanced_indexing_array(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 2, 2, 5]).astype(np.float32)
test_advanced_indexing_array(test_case, numpy_x, np.int32)
test_advanced_indexing_array(test_case, numpy_x, np.int64)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_advanced_indexing_array(test_case, numpy_x, np.int32)
test_advanced_indexing_array(test_case, numpy_x, np.int64)
numpy_x = np.arange(0, 720, 1).reshape([5, 8, 9, 2]).astype(np.float32)
test_advanced_indexing_array(test_case, numpy_x, np.int32)
test_advanced_indexing_array(test_case, numpy_x, np.int64)
def test_combining_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
def test_mask_getitem(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_mask_getitem(test_case, numpy_x)
numpy_x = np.arange(0, 27, 1).reshape(3, 3, 3)
x = flow.tensor(numpy_x)
test_case.assertTrue(
np.allclose(
numpy_x[[False, True, False], 1], x[[False, True, False], 1].numpy()
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[False, True, False], [True, False, False]],
x[[False, True, False], [True, False, False]].numpy(),
)
)
def test_mask_setitem(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
if __name__ == "__main__":
unittest.main()
| 38.367742
| 88
| 0.606777
|
4a0713070dab2ed31dfbdcf4a78ab1d38e2e9b8c
| 21,100
|
py
|
Python
|
wsme/tests/test_types.py
|
kaptnemo/wsme
|
3de88929df41b18d56e423b861e19c5176502384
|
[
"MIT"
] | 2
|
2020-11-05T06:09:20.000Z
|
2021-01-03T07:22:18.000Z
|
venv/Lib/site-packages/wsme/tests/test_types.py
|
sunausti/mywebdemo
|
884bcf3b68e0063dcb08c602f0dc784753ec8a87
|
[
"Apache-2.0"
] | null | null | null |
venv/Lib/site-packages/wsme/tests/test_types.py
|
sunausti/mywebdemo
|
884bcf3b68e0063dcb08c602f0dc784753ec8a87
|
[
"Apache-2.0"
] | null | null | null |
import re
try:
import unittest2 as unittest
except ImportError:
import unittest
import six
from wsme import exc
from wsme import types
def gen_class():
d = {}
exec('''class tmp(object): pass''', d)
return d['tmp']
class TestTypes(unittest.TestCase):
def setUp(self):
types.registry = types.Registry()
def test_default_usertype(self):
class MyType(types.UserType):
basetype = str
My = MyType()
assert My.validate('a') == 'a'
assert My.tobasetype('a') == 'a'
assert My.frombasetype('a') == 'a'
def test_unset(self):
u = types.Unset
assert not u
def test_flat_type(self):
class Flat(object):
aint = int
abytes = six.binary_type
atext = six.text_type
afloat = float
types.register_type(Flat)
assert len(Flat._wsme_attributes) == 4
attrs = Flat._wsme_attributes
print(attrs)
assert attrs[0].key == 'aint'
assert attrs[0].name == 'aint'
assert isinstance(attrs[0], types.wsattr)
assert attrs[0].datatype == int
assert attrs[0].mandatory is False
assert attrs[1].key == 'abytes'
assert attrs[1].name == 'abytes'
assert attrs[2].key == 'atext'
assert attrs[2].name == 'atext'
assert attrs[3].key == 'afloat'
assert attrs[3].name == 'afloat'
def test_private_attr(self):
class WithPrivateAttrs(object):
_private = 12
types.register_type(WithPrivateAttrs)
assert len(WithPrivateAttrs._wsme_attributes) == 0
def test_attribute_order(self):
class ForcedOrder(object):
_wsme_attr_order = ('a2', 'a1', 'a3')
a1 = int
a2 = int
a3 = int
types.register_type(ForcedOrder)
print(ForcedOrder._wsme_attributes)
assert ForcedOrder._wsme_attributes[0].key == 'a2'
assert ForcedOrder._wsme_attributes[1].key == 'a1'
assert ForcedOrder._wsme_attributes[2].key == 'a3'
c = gen_class()
print(c)
types.register_type(c)
del c._wsme_attributes
c.a2 = int
c.a1 = int
c.a3 = int
types.register_type(c)
assert c._wsme_attributes[0].key == 'a1', c._wsme_attributes[0].key
assert c._wsme_attributes[1].key == 'a2'
assert c._wsme_attributes[2].key == 'a3'
def test_wsproperty(self):
class WithWSProp(object):
def __init__(self):
self._aint = 0
def get_aint(self):
return self._aint
def set_aint(self, value):
self._aint = value
aint = types.wsproperty(int, get_aint, set_aint, mandatory=True)
types.register_type(WithWSProp)
print(WithWSProp._wsme_attributes)
assert len(WithWSProp._wsme_attributes) == 1
a = WithWSProp._wsme_attributes[0]
assert a.key == 'aint'
assert a.datatype == int
assert a.mandatory
o = WithWSProp()
o.aint = 12
assert o.aint == 12
def test_nested(self):
class Inner(object):
aint = int
class Outer(object):
inner = Inner
types.register_type(Outer)
assert hasattr(Inner, '_wsme_attributes')
assert len(Inner._wsme_attributes) == 1
def test_inspect_with_inheritance(self):
class Parent(object):
parent_attribute = int
class Child(Parent):
child_attribute = int
types.register_type(Parent)
types.register_type(Child)
assert len(Child._wsme_attributes) == 2
def test_selfreftype(self):
class SelfRefType(object):
pass
SelfRefType.parent = SelfRefType
types.register_type(SelfRefType)
def test_inspect_with_property(self):
class AType(object):
@property
def test(self):
return 'test'
types.register_type(AType)
assert len(AType._wsme_attributes) == 0
assert AType().test == 'test'
def test_enum(self):
aenum = types.Enum(str, 'v1', 'v2')
assert aenum.basetype is str
class AType(object):
a = aenum
types.register_type(AType)
assert AType.a.datatype is aenum
obj = AType()
obj.a = 'v1'
assert obj.a == 'v1', repr(obj.a)
self.assertRaisesRegexp(exc.InvalidInput,
"Invalid input for field/attribute a. \
Value: 'v3'. Value should be one of: v., v.",
setattr,
obj,
'a',
'v3')
def test_attribute_validation(self):
class AType(object):
alist = [int]
aint = int
types.register_type(AType)
obj = AType()
obj.alist = [1, 2, 3]
assert obj.alist == [1, 2, 3]
obj.aint = 5
assert obj.aint == 5
self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', 12)
self.assertRaises(exc.InvalidInput, setattr, obj, 'alist', [2, 'a'])
def test_attribute_validation_minimum(self):
class ATypeInt(object):
attr = types.IntegerType(minimum=1, maximum=5)
types.register_type(ATypeInt)
obj = ATypeInt()
obj.attr = 2
# comparison between 'zero' value and intger minimum (1) raises a
# TypeError which must be wrapped into an InvalidInput exception
self.assertRaises(exc.InvalidInput, setattr, obj, 'attr', 'zero')
def test_text_attribute_conversion(self):
class SType(object):
atext = types.text
abytes = types.bytes
types.register_type(SType)
obj = SType()
obj.atext = six.b('somebytes')
assert obj.atext == six.u('somebytes')
assert isinstance(obj.atext, types.text)
obj.abytes = six.u('sometext')
assert obj.abytes == six.b('sometext')
assert isinstance(obj.abytes, types.bytes)
def test_named_attribute(self):
class ABCDType(object):
a_list = types.wsattr([int], name='a.list')
astr = str
types.register_type(ABCDType)
assert len(ABCDType._wsme_attributes) == 2
attrs = ABCDType._wsme_attributes
assert attrs[0].key == 'a_list', attrs[0].key
assert attrs[0].name == 'a.list', attrs[0].name
assert attrs[1].key == 'astr', attrs[1].key
assert attrs[1].name == 'astr', attrs[1].name
def test_wsattr_del(self):
class MyType(object):
a = types.wsattr(int)
types.register_type(MyType)
value = MyType()
value.a = 5
assert value.a == 5
del value.a
assert value.a is types.Unset
def test_validate_dict(self):
assert types.validate_value({int: str}, {1: '1', 5: '5'})
self.assertRaises(ValueError, types.validate_value,
{int: str}, [])
assert types.validate_value({int: str}, {'1': '1', 5: '5'})
self.assertRaises(ValueError, types.validate_value,
{int: str}, {1: 1, 5: '5'})
def test_validate_list_valid(self):
assert types.validate_value([int], [1, 2])
assert types.validate_value([int], ['5'])
def test_validate_list_empty(self):
assert types.validate_value([int], []) == []
def test_validate_list_none(self):
v = types.ArrayType(int)
assert v.validate(None) is None
def test_validate_list_invalid_member(self):
self.assertRaises(ValueError, types.validate_value, [int],
['not-a-number'])
def test_validate_list_invalid_type(self):
self.assertRaises(ValueError, types.validate_value, [int], 1)
def test_validate_float(self):
self.assertEqual(types.validate_value(float, 1), 1.0)
self.assertEqual(types.validate_value(float, '1'), 1.0)
self.assertEqual(types.validate_value(float, 1.1), 1.1)
self.assertRaises(ValueError, types.validate_value, float, [])
self.assertRaises(ValueError, types.validate_value, float,
'not-a-float')
def test_validate_int(self):
self.assertEqual(types.validate_value(int, 1), 1)
self.assertEqual(types.validate_value(int, '1'), 1)
self.assertEqual(types.validate_value(int, six.u('1')), 1)
self.assertRaises(ValueError, types.validate_value, int, 1.1)
def test_validate_integer_type(self):
v = types.IntegerType(minimum=1, maximum=10)
v.validate(1)
v.validate(5)
v.validate(10)
self.assertRaises(ValueError, v.validate, 0)
self.assertRaises(ValueError, v.validate, 11)
def test_validate_string_type(self):
v = types.StringType(min_length=1, max_length=10,
pattern='^[a-zA-Z0-9]*$')
v.validate('1')
v.validate('12345')
v.validate('1234567890')
self.assertRaises(ValueError, v.validate, '')
self.assertRaises(ValueError, v.validate, '12345678901')
# Test a pattern validation
v.validate('a')
v.validate('A')
self.assertRaises(ValueError, v.validate, '_')
def test_validate_string_type_precompile(self):
precompile = re.compile('^[a-zA-Z0-9]*$')
v = types.StringType(min_length=1, max_length=10,
pattern=precompile)
# Test a pattern validation
v.validate('a')
v.validate('A')
self.assertRaises(ValueError, v.validate, '_')
def test_validate_string_type_pattern_exception_message(self):
regex = '^[a-zA-Z0-9]*$'
v = types.StringType(pattern=regex)
try:
v.validate('_')
self.assertFail()
except ValueError as e:
self.assertIn(regex, str(e))
def test_validate_ipv4_address_type(self):
v = types.IPv4AddressType()
self.assertEqual(v.validate('127.0.0.1'), '127.0.0.1')
self.assertEqual(v.validate('192.168.0.1'), '192.168.0.1')
self.assertEqual(v.validate(u'8.8.1.1'), u'8.8.1.1')
self.assertRaises(ValueError, v.validate, '')
self.assertRaises(ValueError, v.validate, 'foo')
self.assertRaises(ValueError, v.validate,
'2001:0db8:bd05:01d2:288a:1fc0:0001:10ee')
self.assertRaises(ValueError, v.validate, '1.2.3')
def test_validate_ipv6_address_type(self):
v = types.IPv6AddressType()
self.assertEqual(v.validate('0:0:0:0:0:0:0:1'),
'0:0:0:0:0:0:0:1')
self.assertEqual(v.validate(u'0:0:0:0:0:0:0:1'), u'0:0:0:0:0:0:0:1')
self.assertEqual(v.validate('2001:0db8:bd05:01d2:288a:1fc0:0001:10ee'),
'2001:0db8:bd05:01d2:288a:1fc0:0001:10ee')
self.assertRaises(ValueError, v.validate, '')
self.assertRaises(ValueError, v.validate, 'foo')
self.assertRaises(ValueError, v.validate, '192.168.0.1')
self.assertRaises(ValueError, v.validate, '0:0:0:0:0:0:1')
def test_validate_uuid_type(self):
v = types.UuidType()
self.assertEqual(v.validate('6a0a707c-45ef-4758-b533-e55adddba8ce'),
'6a0a707c-45ef-4758-b533-e55adddba8ce')
self.assertEqual(v.validate('6a0a707c45ef4758b533e55adddba8ce'),
'6a0a707c-45ef-4758-b533-e55adddba8ce')
self.assertRaises(ValueError, v.validate, '')
self.assertRaises(ValueError, v.validate, 'foo')
self.assertRaises(ValueError, v.validate,
'6a0a707c-45ef-4758-b533-e55adddba8ce-a')
def test_register_invalid_array(self):
self.assertRaises(ValueError, types.register_type, [])
self.assertRaises(ValueError, types.register_type, [int, str])
self.assertRaises(AttributeError, types.register_type, [1])
def test_register_invalid_dict(self):
self.assertRaises(ValueError, types.register_type, {})
self.assertRaises(ValueError, types.register_type,
{int: str, str: int})
self.assertRaises(ValueError, types.register_type,
{types.Unset: str})
def test_list_attribute_no_auto_register(self):
class MyType(object):
aint = int
assert not hasattr(MyType, '_wsme_attributes')
self.assertRaises(TypeError, types.list_attributes, MyType)
assert not hasattr(MyType, '_wsme_attributes')
def test_list_of_complextypes(self):
class A(object):
bs = types.wsattr(['B'])
class B(object):
i = int
types.register_type(A)
types.register_type(B)
assert A.bs.datatype.item_type is B
def test_cross_referenced_types(self):
class A(object):
b = types.wsattr('B')
class B(object):
a = A
types.register_type(A)
types.register_type(B)
assert A.b.datatype is B
def test_base(self):
class B1(types.Base):
b2 = types.wsattr('B2')
class B2(types.Base):
b2 = types.wsattr('B2')
assert B1.b2.datatype is B2, repr(B1.b2.datatype)
assert B2.b2.datatype is B2
def test_base_init(self):
class C1(types.Base):
s = six.text_type
c = C1(s=six.u('test'))
assert c.s == six.u('test')
def test_array_eq(self):
ell = [types.ArrayType(str)]
assert types.ArrayType(str) in ell
def test_array_sample(self):
s = types.ArrayType(str).sample()
assert isinstance(s, list)
assert s
assert s[0] == ''
def test_dict_sample(self):
s = types.DictType(str, str).sample()
assert isinstance(s, dict)
assert s
assert s == {'': ''}
def test_binary_to_base(self):
import base64
assert types.binary.tobasetype(None) is None
expected = base64.encodestring(six.b('abcdef'))
assert types.binary.tobasetype(six.b('abcdef')) == expected
def test_binary_from_base(self):
import base64
assert types.binary.frombasetype(None) is None
encoded = base64.encodestring(six.b('abcdef'))
assert types.binary.frombasetype(encoded) == six.b('abcdef')
def test_wsattr_weakref_datatype(self):
# If the datatype inside the wsattr ends up a weakref, it
# should be converted to the real type when accessed again by
# the property getter.
import weakref
a = types.wsattr(int)
a.datatype = weakref.ref(int)
assert a.datatype is int
def test_wsattr_list_datatype(self):
# If the datatype inside the wsattr ends up a list of weakrefs
# to types, it should be converted to the real types when
# accessed again by the property getter.
import weakref
a = types.wsattr(int)
a.datatype = [weakref.ref(int)]
assert isinstance(a.datatype, list)
assert a.datatype[0] is int
def test_file_get_content_by_reading(self):
class buffer:
def read(self):
return 'abcdef'
f = types.File(file=buffer())
assert f.content == 'abcdef'
def test_file_content_overrides_file(self):
class buffer:
def read(self):
return 'from-file'
f = types.File(content='from-content', file=buffer())
assert f.content == 'from-content'
def test_file_setting_content_discards_file(self):
class buffer:
def read(self):
return 'from-file'
f = types.File(file=buffer())
f.content = 'from-content'
assert f.content == 'from-content'
def test_file_field_storage(self):
class buffer:
def read(self):
return 'from-file'
class fieldstorage:
filename = 'static.json'
file = buffer()
type = 'application/json'
f = types.File(fieldstorage=fieldstorage)
assert f.content == 'from-file'
def test_file_field_storage_value(self):
class buffer:
def read(self):
return 'from-file'
class fieldstorage:
filename = 'static.json'
file = None
type = 'application/json'
value = 'from-value'
f = types.File(fieldstorage=fieldstorage)
assert f.content == 'from-value'
def test_file_property_file(self):
class buffer:
def read(self):
return 'from-file'
buf = buffer()
f = types.File(file=buf)
assert f.file is buf
def test_file_property_content(self):
class buffer:
def read(self):
return 'from-file'
f = types.File(content=six.b('from-content'))
assert f.file.read() == six.b('from-content')
def test_unregister(self):
class TempType(object):
pass
types.registry.register(TempType)
v = types.registry.lookup('TempType')
self.assertIs(v, TempType)
types.registry._unregister(TempType)
after = types.registry.lookup('TempType')
self.assertIs(after, None)
def test_unregister_twice(self):
class TempType(object):
pass
types.registry.register(TempType)
v = types.registry.lookup('TempType')
self.assertIs(v, TempType)
types.registry._unregister(TempType)
# Second call should not raise an exception
types.registry._unregister(TempType)
after = types.registry.lookup('TempType')
self.assertIs(after, None)
def test_unregister_array_type(self):
class TempType(object):
pass
t = [TempType]
types.registry.register(t)
self.assertNotEqual(types.registry.array_types, set())
types.registry._unregister(t)
self.assertEqual(types.registry.array_types, set())
def test_unregister_array_type_twice(self):
class TempType(object):
pass
t = [TempType]
types.registry.register(t)
self.assertNotEqual(types.registry.array_types, set())
types.registry._unregister(t)
# Second call should not raise an exception
types.registry._unregister(t)
self.assertEqual(types.registry.array_types, set())
def test_unregister_dict_type(self):
class TempType(object):
pass
t = {str: TempType}
types.registry.register(t)
self.assertNotEqual(types.registry.dict_types, set())
types.registry._unregister(t)
self.assertEqual(types.registry.dict_types, set())
def test_unregister_dict_type_twice(self):
class TempType(object):
pass
t = {str: TempType}
types.registry.register(t)
self.assertNotEqual(types.registry.dict_types, set())
types.registry._unregister(t)
# Second call should not raise an exception
types.registry._unregister(t)
self.assertEqual(types.registry.dict_types, set())
def test_reregister(self):
class TempType(object):
pass
types.registry.register(TempType)
v = types.registry.lookup('TempType')
self.assertIs(v, TempType)
types.registry.reregister(TempType)
after = types.registry.lookup('TempType')
self.assertIs(after, TempType)
def test_reregister_and_add_attr(self):
class TempType(object):
pass
types.registry.register(TempType)
attrs = types.list_attributes(TempType)
self.assertEqual(attrs, [])
TempType.one = str
types.registry.reregister(TempType)
after = types.list_attributes(TempType)
self.assertNotEqual(after, [])
def test_dynamicbase_add_attributes(self):
class TempType(types.DynamicBase):
pass
types.registry.register(TempType)
attrs = types.list_attributes(TempType)
self.assertEqual(attrs, [])
TempType.add_attributes(one=str)
after = types.list_attributes(TempType)
self.assertEqual(len(after), 1)
def test_dynamicbase_add_attributes_second(self):
class TempType(types.DynamicBase):
pass
types.registry.register(TempType)
attrs = types.list_attributes(TempType)
self.assertEqual(attrs, [])
TempType.add_attributes(one=str)
TempType.add_attributes(two=int)
after = types.list_attributes(TempType)
self.assertEqual(len(after), 2)
def test_non_registered_complex_type(self):
class TempType(types.Base):
__registry__ = None
self.assertFalse(types.iscomplex(TempType))
types.registry.register(TempType)
self.assertTrue(types.iscomplex(TempType))
| 31.586826
| 79
| 0.595403
|
4a07144655885c41f30164cc4ad69928e34f2fc5
| 10,014
|
py
|
Python
|
codereview/library.py
|
draem0507/rietveld
|
70bda77edf3a642ef51ecc2d73c165345af5fdee
|
[
"Apache-2.0"
] | 583
|
2015-03-28T23:49:34.000Z
|
2022-03-25T10:58:07.000Z
|
codereview/library.py
|
draem0507/rietveld
|
70bda77edf3a642ef51ecc2d73c165345af5fdee
|
[
"Apache-2.0"
] | 61
|
2015-04-02T01:08:34.000Z
|
2021-05-27T16:19:35.000Z
|
codereview/library.py
|
draem0507/rietveld
|
70bda77edf3a642ef51ecc2d73c165345af5fdee
|
[
"Apache-2.0"
] | 175
|
2015-03-29T13:06:36.000Z
|
2022-03-31T07:02:20.000Z
|
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Django template library for Rietveld."""
import cgi
import math
from google.appengine.api import memcache
from google.appengine.api import users
import django.template
import django.utils.safestring
from django.core.urlresolvers import reverse
from codereview import auth_utils
from codereview import models
register = django.template.Library()
user_cache = {}
def get_links_for_users(user_emails):
"""Return a dictionary of email->link to user page and fill caches."""
link_dict = {}
remaining_emails = set(user_emails)
# initialize with email usernames
for email in remaining_emails:
nick = email.split('@', 1)[0]
link_dict[email] = cgi.escape(nick)
# look in the local cache
for email in remaining_emails:
if email in user_cache:
link_dict[email] = user_cache[email]
remaining_emails = remaining_emails - set(user_cache)
if not remaining_emails:
return link_dict
# then look in memcache
memcache_results = memcache.get_multi(remaining_emails,
key_prefix="show_user:")
for email in memcache_results:
link_dict[email] = memcache_results[email]
user_cache[email] = memcache_results[email]
remaining_emails = remaining_emails - set(memcache_results)
if not remaining_emails:
return link_dict
# and finally hit the datastore
accounts = models.Account.get_accounts_for_emails(remaining_emails)
for account in accounts:
if account and account.user_has_selected_nickname:
ret = ('<a href="%s" onMouseOver="M_showUserInfoPopup(this)">%s</a>' %
(reverse('codereview.views.show_user', args=[account.nickname]),
cgi.escape(account.nickname)))
link_dict[account.email] = ret
datastore_results = dict((e, link_dict[e]) for e in remaining_emails)
memcache.set_multi(datastore_results, 300, key_prefix='show_user:')
user_cache.update(datastore_results)
return link_dict
def get_link_for_user(email):
"""Get a link to a user's profile page."""
links = get_links_for_users([email])
return links[email]
@register.filter
def show_user(email, arg=None, _autoescape=None, _memcache_results=None):
"""Render a link to the user's dashboard, with text being the nickname."""
if isinstance(email, users.User):
email = email.email()
if not arg:
user = auth_utils.get_current_user()
if user is not None and email == user.email():
return 'me'
ret = get_link_for_user(email)
return django.utils.safestring.mark_safe(ret)
@register.filter
def show_reviewers(reviewer_list, arg=None):
"""Render list of links to each reviewer's dashboard with color."""
email_list = []
for reviewer, _approval in reviewer_list.items():
email = reviewer
if isinstance(email, users.User):
email = email.email()
email_list.append(email)
links = get_links_for_users(email_list)
if not arg:
user = auth_utils.get_current_user()
if user is not None:
links[user.email()] = 'me'
return django.utils.safestring.mark_safe(', '.join(
format_approval_text(links[r], a) for r, a in reviewer_list.items()))
def format_approval_text(text, approval):
if approval == None:
return text
if approval:
return "<span class='approval'>" + text + "</span>"
return "<span class='disapproval'>" + text + "</span>"
@register.filter
def show_users(email_list, arg=None):
"""Render list of links to each user's dashboard."""
new_email_list = []
for email in email_list:
if isinstance(email, users.User):
email = email.email()
new_email_list.append(email)
links = get_links_for_users(new_email_list)
if not arg:
user = auth_utils.get_current_user()
if user is not None:
links[user.email()] = 'me'
return django.utils.safestring.mark_safe(', '.join(
links[email] for email in email_list))
class UrlAppendViewSettingsNode(django.template.Node):
"""Django template tag that appends context and column_width parameter.
This tag should be used after any URL that requires view settings.
Example:
<a href='{%url /foo%}{%urlappend_view_settings%}'>
The tag tries to get the current column width and context from the
template context and if they're present it returns '?param1¶m2'
otherwise it returns an empty string.
"""
def __init__(self):
super(UrlAppendViewSettingsNode, self).__init__()
self.view_context = django.template.Variable('context')
self.view_colwidth = django.template.Variable('column_width')
def render(self, context):
"""Returns a HTML fragment."""
url_params = []
current_context = -1
try:
current_context = self.view_context.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_context is None:
url_params.append('context=')
elif isinstance(current_context, int) and current_context > 0:
url_params.append('context=%d' % current_context)
current_colwidth = None
try:
current_colwidth = self.view_colwidth.resolve(context)
except django.template.VariableDoesNotExist:
pass
if current_colwidth is not None:
url_params.append('column_width=%d' % current_colwidth)
if url_params:
return '?%s' % '&'.join(url_params)
return ''
@register.tag
def urlappend_view_settings(_parser, _token):
"""The actual template tag."""
return UrlAppendViewSettingsNode()
def get_nickname(email, never_me=False, request=None):
"""Return a nickname for an email address.
If 'never_me' is True, 'me' is not returned if 'email' belongs to the
current logged in user. If 'request' is a HttpRequest, it is used to
cache the nickname returned by models.Account.get_nickname_for_email().
"""
if isinstance(email, users.User):
email = email.email()
if not never_me:
if request is not None:
user = request.user
else:
user = auth_utils.get_current_user()
if user is not None and email == user.email():
return 'me'
if request is None:
return models.Account.get_nickname_for_email(email)
# _nicknames is injected into request as a cache.
# TODO(maruel): Use memcache instead.
# Access to a protected member _nicknames of a client class
# pylint: disable=W0212
if getattr(request, '_nicknames', None) is None:
request._nicknames = {}
if email in request._nicknames:
return request._nicknames[email]
result = models.Account.get_nickname_for_email(email)
request._nicknames[email] = result
return result
class NicknameNode(django.template.Node):
"""Renders a nickname for a given email address.
The return value is cached if a HttpRequest is available in a
'request' template variable.
The template tag accepts one or two arguments. The first argument is
the template variable for the email address. If the optional second
argument evaluates to True, 'me' as nickname is never rendered.
Example usage:
{% cached_nickname msg.sender %}
{% cached_nickname msg.sender True %}
"""
def __init__(self, email_address, never_me=''):
"""Constructor.
'email_address' is the name of the template variable that holds an
email address. If 'never_me' evaluates to True, 'me' won't be returned.
"""
super(NicknameNode, self).__init__()
self.email_address = django.template.Variable(email_address)
self.never_me = bool(never_me.strip())
self.is_multi = False
def render(self, context):
try:
email = self.email_address.resolve(context)
except django.template.VariableDoesNotExist:
return ''
request = context.get('request')
if self.is_multi:
return ', '.join(get_nickname(e, self.never_me, request) for e in email)
return get_nickname(email, self.never_me, request)
@register.tag
def nickname(_parser, token):
"""Almost the same as nickname filter but the result is cached."""
try:
_, email_address, never_me = token.split_contents()
except ValueError:
try:
_, email_address = token.split_contents()
never_me = ''
except ValueError:
raise django.template.TemplateSyntaxError(
"%r requires exactly one or two arguments" % token.contents.split()[0])
return NicknameNode(email_address, never_me)
@register.tag
def nicknames(parser, token):
"""Wrapper for nickname tag with is_multi flag enabled."""
node = nickname(parser, token)
node.is_multi = True
return node
@register.filter
def num_drafts(issue, user):
"""Returns number of drafts for given user.
:param issue: an Issue instance.
:param user: an User instance or None.
:returns: Drafts for given object.
"""
return issue.get_num_drafts(user)
@register.filter
def format_duration(seconds):
"""Convert a number of seconds into human readable compact string."""
if not seconds:
return seconds
seconds = int(seconds)
prefix = ''
if seconds < 0:
prefix = '-'
seconds = -seconds
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
hours = math.floor(minutes / 60)
minutes -= hours * 60
days = math.floor(hours / 24)
hours -= days * 24
out = []
if days > 0:
out.append('%dd' % days)
if hours > 0 or days > 0:
out.append('%02dh' % hours)
if minutes > 0 or hours > 0 or days > 0:
out.append('%02dm' % minutes)
if seconds > 0 and not out:
# Skip seconds unless there's only seconds.
out.append('%02ds' % seconds)
return prefix + ''.join(out).lstrip('0')
| 29.982036
| 79
| 0.707509
|
4a071532a6373b7ad2e1d682cba4de8434257672
| 8,655
|
py
|
Python
|
airflow/gcp/operators/datastore.py
|
AntonVolDev/airflow
|
b297b59593f6da6610a65cc0fabcafa5ea5992a7
|
[
"Apache-2.0"
] | 1
|
2019-09-06T09:55:18.000Z
|
2019-09-06T09:55:18.000Z
|
airflow/gcp/operators/datastore.py
|
AntonVolDev/airflow
|
b297b59593f6da6610a65cc0fabcafa5ea5992a7
|
[
"Apache-2.0"
] | null | null | null |
airflow/gcp/operators/datastore.py
|
AntonVolDev/airflow
|
b297b59593f6da6610a65cc0fabcafa5ea5992a7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains Google Datastore operators.
"""
from airflow.gcp.hooks.datastore import DatastoreHook
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DatastoreExportOperator(BaseOperator):
"""
Export entities from Google Cloud Datastore to Cloud Storage
:param bucket: name of the cloud storage bucket to backup data
:type bucket: str
:param namespace: optional namespace path in the specified Cloud Storage bucket
to backup data. If this namespace does not exist in GCS, it will be created.
:type namespace: str
:param datastore_conn_id: the name of the Datastore connection id to use
:type datastore_conn_id: str
:param cloud_storage_conn_id: the name of the cloud storage connection id to
force-write backup
:type cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param entity_filter: description of what data from the project is included in the
export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:type entity_filter: dict
:param labels: client-assigned labels for cloud storage
:type labels: dict
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:type polling_interval_in_seconds: int
:param overwrite_existing: if the storage bucket + namespace is not empty, it will be
emptied prior to exports. This enables overwriting existing backups.
:type overwrite_existing: bool
"""
template_fields = ['bucket', 'namespace', 'entity_filter', 'labels']
@apply_defaults
def __init__(self, # pylint:disable=too-many-arguments
bucket,
namespace=None,
datastore_conn_id='google_cloud_default',
cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
entity_filter=None,
labels=None,
polling_interval_in_seconds=10,
overwrite_existing=False,
project_id=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.datastore_conn_id = datastore_conn_id
self.cloud_storage_conn_id = cloud_storage_conn_id
self.delegate_to = delegate_to
self.bucket = bucket
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.overwrite_existing = overwrite_existing
self.project_id = project_id
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context):
self.log.info('Exporting data to Cloud Storage bucket %s', self.bucket)
if self.overwrite_existing and self.namespace:
gcs_hook = GoogleCloudStorageHook(self.cloud_storage_conn_id)
objects = gcs_hook.list(self.bucket, prefix=self.namespace)
for obj in objects:
gcs_hook.delete(self.bucket, obj)
ds_hook = DatastoreHook(self.datastore_conn_id, self.delegate_to)
result = ds_hook.export_to_storage_bucket(bucket=self.bucket,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id
)
operation_name = result['name']
result = ds_hook.poll_operation_until_done(operation_name,
self.polling_interval_in_seconds)
state = result['metadata']['common']['state']
if state != 'SUCCESSFUL':
raise AirflowException('Operation failed: result={}'.format(result))
return result
class DatastoreImportOperator(BaseOperator):
"""
Import entities from Cloud Storage to Google Cloud Datastore
:param bucket: container in Cloud Storage to store data
:type bucket: str
:param file: path of the backup metadata file in the specified Cloud Storage bucket.
It should have the extension .overall_export_metadata
:type file: str
:param namespace: optional namespace of the backup metadata file in
the specified Cloud Storage bucket.
:type namespace: str
:param entity_filter: description of what data from the project is included in
the export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:type entity_filter: dict
:param labels: client-assigned labels for cloud storage
:type labels: dict
:param datastore_conn_id: the name of the connection id to use
:type datastore_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:type polling_interval_in_seconds: int
"""
template_fields = ['bucket', 'file', 'namespace', 'entity_filter', 'labels']
@apply_defaults
def __init__(self,
bucket,
file,
namespace=None,
entity_filter=None,
labels=None,
datastore_conn_id='google_cloud_default',
delegate_to=None,
polling_interval_in_seconds=10,
project_id=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.datastore_conn_id = datastore_conn_id
self.delegate_to = delegate_to
self.bucket = bucket
self.file = file
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.project_id = project_id
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context):
self.log.info('Importing data from Cloud Storage bucket %s', self.bucket)
ds_hook = DatastoreHook(self.datastore_conn_id, self.delegate_to)
result = ds_hook.import_from_storage_bucket(bucket=self.bucket,
file=self.file,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id
)
operation_name = result['name']
result = ds_hook.poll_operation_until_done(operation_name,
self.polling_interval_in_seconds)
state = result['metadata']['common']['state']
if state != 'SUCCESSFUL':
raise AirflowException('Operation failed: result={}'.format(result))
return result
| 44.613402
| 105
| 0.64067
|
4a0715804f88ea83e9b9702e740d712cac990b63
| 950
|
py
|
Python
|
test_schedule.py
|
leonardozcm/ASFM-Net-Review
|
2584d2d098c760e559d3f632b72b9ad9881c59d5
|
[
"MIT"
] | 1
|
2022-02-21T11:40:34.000Z
|
2022-02-21T11:40:34.000Z
|
test_schedule.py
|
leonardozcm/ASFM-Net-Pytorch
|
2584d2d098c760e559d3f632b72b9ad9881c59d5
|
[
"MIT"
] | 2
|
2021-12-30T17:01:19.000Z
|
2022-01-01T08:35:49.000Z
|
test_schedule.py
|
leonardozcm/ASFM-Net-Review
|
2584d2d098c760e559d3f632b72b9ad9881c59d5
|
[
"MIT"
] | null | null | null |
def getalpha(schedule, step):
alpha = 0.0
for (point, alpha_) in schedule:
if step >= point:
alpha = alpha_
else:
break
return alpha
step_stage_0 = 0
step_stage_1 = 5e4
step_stage_2 = 7e4
step_stage_3 = 1e5
step_stage_4 = 2.5e5
step_stages = [step_stage_0, step_stage_1,
step_stage_2, step_stage_3, step_stage_4]
schedule = [[1., 0., 0., 0., 0.],
[0., 1., 1., 0., 0.],
[0., 0.1, 0.5, 1.0, 0.9]]
schedule_new = []
for ls in schedule:
ls_new = []
for i, a in enumerate(ls):
ls_new.append((step_stages[i], a))
schedule_new.append(ls_new)
step_0 = 2e4
step_1 = 6e4
step_2 = 9e4
step_3 = 2e5
step_4 = 3e5
steps_test = [step_0, step_1, step_2, step_3, step_4]
for i in steps_test:
print("="*10, " ", i, " ", "="*10)
result = []
for index in range(3):
result.append(getalpha(schedule_new[index], i))
print(result)
| 21.111111
| 56
| 0.578947
|
4a0715a377fe4815b555f9e7e19c294fb90d7371
| 86
|
py
|
Python
|
lmctl/config/exceptions.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 3
|
2021-07-19T09:46:01.000Z
|
2022-03-07T13:51:25.000Z
|
lmctl/config/exceptions.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 43
|
2019-08-27T12:36:29.000Z
|
2020-08-27T14:50:40.000Z
|
lmctl/config/exceptions.py
|
manojn97/lmctl
|
844925cb414722351efac90cb97f10c1185eef7a
|
[
"Apache-2.0"
] | 7
|
2020-09-22T20:32:17.000Z
|
2022-03-29T12:25:51.000Z
|
class ConfigError(Exception):
pass
class ConfigParserError(ConfigError):
pass
| 17.2
| 37
| 0.767442
|
4a0715f441b9ba1d582090ed7424f2ab9771707c
| 512
|
py
|
Python
|
alipay/aop/api/response/KoubeiMarketingCampaignItemMerchantactivityModifyResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/KoubeiMarketingCampaignItemMerchantactivityModifyResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/KoubeiMarketingCampaignItemMerchantactivityModifyResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiMarketingCampaignItemMerchantactivityModifyResponse(AlipayResponse):
def __init__(self):
super(KoubeiMarketingCampaignItemMerchantactivityModifyResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(KoubeiMarketingCampaignItemMerchantactivityModifyResponse, self).parse_response_content(response_content)
| 32
| 130
| 0.810547
|
4a07163506fcd21ee0f141e4098158388ca3c1d7
| 306
|
py
|
Python
|
util.py
|
Oyekunle-Mark/roaming-serpent
|
c9433234d42e4fc7ab2a36e6186a962e201ce1c1
|
[
"MIT"
] | 1
|
2020-06-04T08:13:47.000Z
|
2020-06-04T08:13:47.000Z
|
util.py
|
Oyekunle-Mark/roaming-serpent
|
c9433234d42e4fc7ab2a36e6186a962e201ce1c1
|
[
"MIT"
] | null | null | null |
util.py
|
Oyekunle-Mark/roaming-serpent
|
c9433234d42e4fc7ab2a36e6186a962e201ce1c1
|
[
"MIT"
] | 1
|
2019-11-25T22:38:58.000Z
|
2019-11-25T22:38:58.000Z
|
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, value):
self.queue.append(value)
def dequeue(self):
if self.size() > 0:
return self.queue.pop(0)
else:
return None
def size(self):
return len(self.queue)
| 19.125
| 36
| 0.522876
|
4a07165c05f58647f0cdd062b29386bb73e9cc45
| 10,717
|
py
|
Python
|
apps/projects/migrations/0011_auto__add_field_project_allow_participation__add_field_project_allow_s.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2015-04-06T15:20:29.000Z
|
2016-12-30T12:25:11.000Z
|
apps/projects/migrations/0011_auto__add_field_project_allow_participation__add_field_project_allow_s.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 2
|
2019-02-17T17:38:02.000Z
|
2019-03-28T03:49:16.000Z
|
apps/projects/migrations/0011_auto__add_field_project_allow_participation__add_field_project_allow_s.py
|
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803
|
4e374b4d52dfb9039ebe543e7f27682189022307
|
[
"BSD-3-Clause"
] | 1
|
2019-03-28T03:49:18.000Z
|
2019-03-28T03:49:18.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.allow_participation'
db.add_column('projects_project', 'allow_participation', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Project.allow_sub_projects'
db.add_column('projects_project', 'allow_sub_projects', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Project.parent_project_id'
db.add_column('projects_project', 'parent_project_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Adding field 'Project.sub_project_label'
db.add_column('projects_project', 'sub_project_label', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Project.allow_participation'
db.delete_column('projects_project', 'allow_participation')
# Deleting field 'Project.allow_sub_projects'
db.delete_column('projects_project', 'allow_sub_projects')
# Deleting field 'Project.parent_project_id'
db.delete_column('projects_project', 'parent_project_id')
# Deleting field 'Project.sub_project_label'
db.delete_column('projects_project', 'sub_project_label')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.link': {
'Meta': {'object_name': 'Link'},
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']", 'null': 'True', 'blank': 'True'}),
'subscribe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subscriber.Subscription']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'allow_participation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_sub_projects': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects_following'", 'symmetrical': 'False', 'to': "orm['users.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_project_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'sub_project_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'subscriber.subscription': {
'Meta': {'object_name': 'Subscription'},
'hub': ('django.db.models.fields.URLField', [], {'max_length': '1023'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lease_expiration': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'topic': ('django.db.models.fields.URLField', [], {'max_length': '1023'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verify_token': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['projects']
| 71.446667
| 182
| 0.579453
|
4a0716c7e5a1249fa4d12f1c2fd322823644a410
| 6,951
|
py
|
Python
|
safety_gym/test/test_bench.py
|
zhangdongkun98/safety-gym
|
21c91a9d0e7460b3b37ba6bb3e082b680d98f333
|
[
"MIT"
] | 327
|
2019-11-21T16:50:41.000Z
|
2022-03-31T16:23:38.000Z
|
safety_gym/test/test_bench.py
|
zhangdongkun98/safety-gym
|
21c91a9d0e7460b3b37ba6bb3e082b680d98f333
|
[
"MIT"
] | 13
|
2019-11-26T19:40:26.000Z
|
2022-03-30T07:17:13.000Z
|
safety_gym/test/test_bench.py
|
zhangdongkun98/safety-gym
|
21c91a9d0e7460b3b37ba6bb3e082b680d98f333
|
[
"MIT"
] | 81
|
2019-11-22T01:16:36.000Z
|
2022-03-14T10:34:21.000Z
|
#!/usr/bin/env python
import re
import unittest
import numpy as np
import gym
import gym.spaces
from safety_gym.envs.engine import Engine
class TestBench(unittest.TestCase):
def test_goal(self):
''' Point should run into and get a goal '''
config = {
'robot_base': 'xmls/point.xml',
'goal_size': 0.5,
'goal_placements': [(0, -.5, 5, .5)],
'reward_goal': 1.0,
'reward_distance': 1.0,
'robot_locations': [(0, 0)],
'robot_rot': 0,
'_seed': 0,
}
env = Engine(config)
env.reset()
goal_met = False
for _ in range(999):
act = np.zeros(env.action_space.shape)
act[0] = 1
_, reward, done, info = env.step(act)
self.assertFalse(done)
# If we have not yet got the goal
if not goal_met:
# Reward should be positive, since we're moving towards it.
self.assertGreater(reward, 0)
# Update if we got the goal
if 'goal_met' in info:
goal_met = info['goal_met']
# Assert we got 1 point for the goal
self.assertGreater(reward, 1)
# env.render() # Uncomment to visualize test
self.assertTrue(goal_met)
def test_hazards(self):
''' Point should run into and get a hazard '''
config = {
'robot_base': 'xmls/point.xml',
'goal_size': 0.5,
'goal_placements': [(5, -.5, 10, .5)],
'reward_goal': 1.0,
'reward_distance': 1.0,
'constrain_indicator': True,
'constrain_hazards': True,
'hazards_num': 1,
'hazards_size': 0.5,
'hazards_locations': [(2, 0)],
'hazards_cost': 1.0,
'robot_locations': [(0, 0)],
'robot_rot': 0,
'_seed': 0,
}
env = Engine(config)
env.reset()
goal_met = False
hazard_found = False
for _ in range(999):
act = np.zeros(env.action_space.shape)
act[0] = 1
_, reward, done, info = env.step(act)
if not hazard_found:
if info['cost']:
hazard_found = True
self.assertEqual(info['cost'], 1.0) # Sparse costs
self.assertGreater(info['cost_hazards'], 0.0) # Nonzero hazard cost
if 'goal_met' in info:
goal_met = info['goal_met']
# env.render() # Uncomment to visualize test
self.assertTrue(hazard_found)
self.assertTrue(goal_met)
def test_vases(self):
''' Point should run into and past a vase, pushing it out of the way '''
config = {
'robot_base': 'xmls/point.xml',
'goal_size': 0.5,
'goal_placements': [(5, -.5, 10, .5)],
'reward_goal': 1.0,
'reward_distance': 1.0,
'constrain_indicator': True,
'constrain_vases': True,
'vases_num': 1,
'vases_locations': [(2, 0)],
'vases_contact_cost': 1.0,
'vases_displace_cost': 1.0,
'vases_velocity_cost': 1.0,
'robot_locations': [(0, 0)],
'robot_rot': 0,
'_seed': 0,
}
env = Engine(config)
env.reset()
goal_met = False
vase_found = False
for _ in range(999):
act = np.zeros(env.action_space.shape)
act[0] = 1
_, reward, done, info = env.step(act)
if not vase_found:
if info['cost']:
vase_found = True
self.assertEqual(info['cost'], 1.0) # Sparse costs
self.assertGreater(info['cost_vases_contact'], 0.0) # Nonzero vase cost
self.assertGreater(info['cost_vases_velocity'], 0.0) # Nonzero vase cost
else:
# We've already found the vase (and hit it), ensure displace cost
self.assertEqual(info['cost'], 1.0) # Sparse costs
self.assertGreater(info['cost_vases_displace'], 0.0) # Nonzero vase cost
if 'goal_met' in info:
goal_met = info['goal_met']
# env.render() # Uncomment to visualize test
self.assertTrue(vase_found)
self.assertTrue(goal_met)
def check_correct_lidar(self, env_name):
''' Check that a benchmark env has the right lidar obs for the objects in scene '''
env = gym.make(env_name)
env.reset()
physics = env.unwrapped
world = physics.world
obs_space_dict = physics.obs_space_dict
task = physics.task
lidar_count = sum('lidar' in o.lower() for o in obs_space_dict.keys())
# Goal based lidar
if task == 'x':
self.assertEqual(lidar_count, 0)
elif task == 'circle':
self.assertEqual(lidar_count, 1)
self.assertIn('circle_lidar', obs_space_dict)
elif task == 'goal':
self.assertIn('goal_lidar', obs_space_dict)
elif task == 'push':
self.assertIn('goal_lidar', obs_space_dict)
self.assertIn('box_lidar', obs_space_dict)
elif task == 'button':
self.assertIn('goal_lidar', obs_space_dict)
self.assertIn('buttons_lidar', obs_space_dict)
if physics.constrain_hazards or physics.hazards_num > 0:
self.assertIn('hazards_lidar', obs_space_dict)
self.assertGreater(physics.hazards_num, 0)
if physics.constrain_vases or physics.vases_num > 0:
self.assertIn('vases_lidar', obs_space_dict)
self.assertGreater(physics.vases_num, 0)
if physics.constrain_pillars or physics.pillars_num > 0:
self.assertIn('pillars_lidar', obs_space_dict)
self.assertGreater(physics.pillars_num, 0)
if physics.constrain_buttons or physics.buttons_num > 0:
self.assertIn('buttons_lidar', obs_space_dict)
self.assertGreater(physics.buttons_num, 0)
if physics.constrain_gremlins or physics.gremlins_num > 0:
self.assertIn('gremlins_lidar', obs_space_dict)
self.assertGreater(physics.gremlins_num, 0)
def test_correct_lidar(self):
''' We should have lidar for every object in the env '''
matched = []
for env_spec in gym.envs.registry.all():
#if re.match(r'Safexp-.*-v0', env_spec.id) is not None:
if 'Safexp' in env_spec.id and not('Vision' in env_spec.id):
matched.append(env_spec.id)
assert matched, 'Failed to match any environments!'
for env_name in matched:
print(env_name)
self.check_correct_lidar(env_name)
if __name__ == '__main__':
unittest.main()
| 38.616667
| 93
| 0.546972
|
4a071775d6e2cf956930f6b56360d6f915bf6932
| 193
|
py
|
Python
|
src/playground/count.py
|
meclav/whistle
|
b228cea165a12a5d0a4c0046c4d26440e4e733d8
|
[
"MIT"
] | null | null | null |
src/playground/count.py
|
meclav/whistle
|
b228cea165a12a5d0a4c0046c4d26440e4e733d8
|
[
"MIT"
] | null | null | null |
src/playground/count.py
|
meclav/whistle
|
b228cea165a12a5d0a4c0046c4d26440e4e733d8
|
[
"MIT"
] | null | null | null |
class Box:
def __init__(self, x):
self.value = x
def countWithBox():
box = 0
def count():
nonlocal box
box+=1
return box
return count
c = countWithBox()
print(c())
print(c())
| 10.157895
| 23
| 0.626943
|
4a0717f90400c682e6c352f46e2e6c299915aa20
| 2,951
|
py
|
Python
|
src/app/controllers/auth.py
|
leeuw12/Comment-Cloud
|
bcdf4d9258e0d097736e23892833fa84c4e2bf68
|
[
"MIT"
] | null | null | null |
src/app/controllers/auth.py
|
leeuw12/Comment-Cloud
|
bcdf4d9258e0d097736e23892833fa84c4e2bf68
|
[
"MIT"
] | 1
|
2020-04-19T23:59:17.000Z
|
2020-04-19T23:59:17.000Z
|
src/app/controllers/auth.py
|
leeuw12/Comment-Cloud
|
bcdf4d9258e0d097736e23892833fa84c4e2bf68
|
[
"MIT"
] | null | null | null |
import datetime
import logging
from functools import wraps
import jwt
from flask import make_response
from flask import request, url_for, redirect, jsonify, Blueprint, render_template
from flask_login import current_user, login_required
from flask_login import login_user, logout_user
from werkzeug.security import check_password_hash, generate_password_hash
from .users import User
from .. import app, services
auth = Blueprint('auth', __name__)
logger = logging.getLogger(__name__)
@auth.route('/login', methods=['GET'])
def login():
try:
current_user.id
return redirect(url_for("main.admin"))
except:
return render_template('login.html', error="")
@auth.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/register', methods=['GET'])
def register():
try:
current_user.id
return redirect(url_for("main.admin"))
except:
return render_template('register.html')
@auth.route('/login', methods=['POST'])
def login_post():
email = request.form.get('username')
password = request.form.get('password')
site_admin = services.get_site_admin_by_email(email)
if site_admin:
if check_password_hash(site_admin.Passwdhash, password):
login_user(User(site_admin))
return redirect(url_for('main.admin'))
return "ERROR"
@auth.route('/register', methods=['POST'])
def register_post():
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
site_admin = services.get_site_admin_by_email(email)
if site_admin:
return redirect(url_for('auth.login'))
try:
services.add_site_admin(username, email, generate_password_hash(password, method='sha256'))
return redirect(url_for('auth.login'))
except Exception as ex:
logger.warning(ex)
return redirect(url_for('auth.register'))
# Token generator
def get_token():
expiration_date = datetime.datetime.utcnow() + \
datetime.timedelta(seconds=36000)
token = jwt.encode({'exp': expiration_date}, app.secret_key, algorithm='HS256')
# token = token[2:-1]
services.set_token(token, current_user.id)
logger.info(token)
return token
# Token decorator
# When called you need to pass a valid
# token under the variable token
def token_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
token_value = request.args.get('token')
try:
logger.info('token_required: ' + str(token_value))
# str(jwt.decode(token_value, app.secret_key))
token = services.get_token_by_token(token_value)
logger.info(str(token.Status))
if token.Status == True:
return f(*args, **kwargs)
except:
return make_response(jsonify({'error': 'Need a valid Token'}), 401)
return wrapper
| 29.51
| 99
| 0.678753
|
4a0718c5495d489ec69fdfbe880de9a1ebf2dab9
| 1,021
|
py
|
Python
|
auth.py
|
ellygaytor/trasheddit
|
b8008f098a234125d4fb19c671ea3243d25c44a5
|
[
"MIT"
] | 3
|
2021-10-15T16:52:43.000Z
|
2022-01-05T00:33:01.000Z
|
auth.py
|
ellygaytor/trasheddit
|
b8008f098a234125d4fb19c671ea3243d25c44a5
|
[
"MIT"
] | 2
|
2022-01-15T21:25:48.000Z
|
2022-02-16T16:22:00.000Z
|
auth.py
|
ellygaytor/trasheddit
|
b8008f098a234125d4fb19c671ea3243d25c44a5
|
[
"MIT"
] | 1
|
2022-01-15T18:40:28.000Z
|
2022-01-15T18:40:28.000Z
|
import praw
import keyring
import json
def auth(username):
account = keyring.get_password("trasheddit", username)
if account is None:
print(
"No configuration found for '{}'. Please enter the PRAW configuration details now:".format(
username
)
)
client_id = str(input("client_id: "))
client_secret = str(input("client_secret: "))
username = username
password = str(input("password: "))
config = {
client_id: client_id,
client_secret: client_secret,
username: username,
password: password,
}
keyring.set_password("trasheddit", username, json.dumps(config))
config = json.loads(account)
reddit = praw.Reddit(
client_id=config.get("client_id"),
client_secret=config.get("client_secret"),
user_agent="trasheddit",
username=config.get("username"),
password=config.get("password"),
)
return reddit
| 27.594595
| 103
| 0.593536
|
4a0718d5f54dd1772f6c808f8943eb0f5cfbf649
| 4,623
|
py
|
Python
|
sktime/transformations/panel/dictionary_based/_paa.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 5,349
|
2019-03-21T14:56:50.000Z
|
2022-03-31T11:25:30.000Z
|
sktime/transformations/panel/dictionary_based/_paa.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 1,803
|
2019-03-26T13:33:53.000Z
|
2022-03-31T23:58:10.000Z
|
sktime/transformations/panel/dictionary_based/_paa.py
|
marcio55afr/sktime
|
25ba2f470f037366ca6b0e529137d3d0a6191e2e
|
[
"BSD-3-Clause"
] | 911
|
2019-03-25T01:21:30.000Z
|
2022-03-31T04:45:51.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
from sktime.transformations.base import _PanelToPanelTransformer
from sktime.datatypes._panel._convert import from_nested_to_2d_array
from sktime.utils.validation.panel import check_X
__author__ = "Matthew Middlehurst"
class PAA(_PanelToPanelTransformer):
"""
(PAA) Piecewise Aggregate Approximation Transformer, as described in
Eamonn Keogh, Kaushik Chakrabarti, Michael Pazzani, and Sharad Mehrotra.
Dimensionality reduction for fast similarity search in large time series
databases.
Knowledge and information Systems, 3(3), 263-286, 2001.
For each series reduce the dimensionality to num_intervals, where each
value is the mean of values in
the interval.
TO DO: pythonise it to make it more efficient. Maybe check vs this version
http://vigne.sh/posts/piecewise-aggregate-approx/
Could have: Tune the interval size in fit somehow?
Parameters
----------
num_intervals : int, dimension of the transformed data (default 8)
"""
def __init__(self, num_intervals=8):
self.num_intervals = num_intervals
super(PAA, self).__init__()
def set_num_intervals(self, n):
self.num_intervals = n
def transform(self, X, y=None):
"""
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, n_dims]
Nested dataframe with multivariate time-series in cells.
Returns
-------
dims: Pandas data frame with first dimension in column zero,
second in column one etc.
"""
# Check the data
self.check_is_fitted()
X = check_X(X, enforce_univariate=False, coerce_to_pandas=True)
# Get information about the dataframe
num_atts = len(X.iloc[0, 0])
col_names = X.columns
# Check the parameters are appropriate
self._check_parameters(num_atts)
# On each dimension, perform PAA
dataFrames = []
for x in col_names:
dataFrames.append(self._perform_paa_along_dim(pd.DataFrame(X[x])))
# Combine the dimensions together
result = pd.concat(dataFrames, axis=1, sort=False)
result.columns = col_names
return result
def _perform_paa_along_dim(self, X):
X = from_nested_to_2d_array(X, return_numpy=True)
num_atts = X.shape[1]
num_insts = X.shape[0]
dims = pd.DataFrame()
data = []
for i in range(num_insts):
series = X[i, :]
frames = []
current_frame = 0
current_frame_size = 0
frame_length = num_atts / self.num_intervals
frame_sum = 0
for n in range(num_atts):
remaining = frame_length - current_frame_size
if remaining > 1:
frame_sum += series[n]
current_frame_size += 1
else:
frame_sum += remaining * series[n]
current_frame_size += remaining
if current_frame_size == frame_length:
frames.append(frame_sum / frame_length)
current_frame += 1
frame_sum = (1 - remaining) * series[n]
current_frame_size = 1 - remaining
# if the last frame was lost due to double imprecision
if current_frame == self.num_intervals - 1:
frames.append(frame_sum / frame_length)
data.append(pd.Series(frames))
dims[0] = data
return dims
def _check_parameters(self, num_atts):
"""
Function for checking the values of parameters inserted into PAA.
For example, the number of subsequences cannot be larger than the
time series length.
Throws
------
ValueError or TypeError if a parameters input is invalid.
"""
if isinstance(self.num_intervals, int):
if self.num_intervals <= 0:
raise ValueError(
"num_intervals must have the \
value of at least 1"
)
if self.num_intervals > num_atts:
raise ValueError(
"num_intervals cannot be higher \
than the time series length."
)
else:
raise TypeError(
"num_intervals must be an 'int'. Found '"
+ type(self.num_intervals).__name__
+ "' instead."
)
| 32.104167
| 78
| 0.58209
|
4a071966998cc02da81c9fe12c651b84e3551c9b
| 2,613
|
py
|
Python
|
tortoise/tests/test__models__.py
|
trilleplay/tortoise-orm
|
456e6893f202e8225bf4087072f58843f67dc65c
|
[
"Apache-2.0"
] | null | null | null |
tortoise/tests/test__models__.py
|
trilleplay/tortoise-orm
|
456e6893f202e8225bf4087072f58843f67dc65c
|
[
"Apache-2.0"
] | 5
|
2020-03-24T17:23:14.000Z
|
2021-12-13T20:12:49.000Z
|
tortoise/tests/test__models__.py
|
lth1994/123
|
378c336c22a460849f8730ca2ec064307b0f2e41
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for __models__
"""
import re
from asynctest.mock import CoroutineMock, patch
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import ConfigurationError
from tortoise.utils import generate_post_table_sql, get_schema_sql
class TestGenerateSchema(test.SimpleTestCase):
async def setUp(self):
try:
Tortoise.apps = {}
Tortoise._connections = {}
Tortoise._inited = False
except ConfigurationError:
pass
Tortoise._inited = False
self.sqls = ""
self.engine = test.getDBConfig(app_label="models", modules=[])["connections"]["models"][
"engine"
]
async def tearDown(self):
Tortoise._connections = {}
await Tortoise._reset_apps()
async def init_for(self, module: str, safe=False) -> None:
if self.engine != "tortoise.backends.sqlite":
raise test.SkipTest("sqlite only")
with patch(
"tortoise.backends.sqlite.client.SqliteClient.create_connection", new=CoroutineMock()
):
await Tortoise.init(
{
"connections": {
"default": {
"engine": "tortoise.backends.sqlite",
"credentials": {"file_path": ":memory:"},
}
},
"apps": {"models": {"models": [module], "default_connection": "default"}},
}
)
self.sqls = get_schema_sql(Tortoise._connections["default"], safe).split("; ")
self.post_sqls = generate_post_table_sql(Tortoise._connections["default"], safe).split(
"; "
)
def get_sql(self, text: str) -> str:
return re.sub(r"[ \t\n\r]+", " ", [sql for sql in self.sqls if text in sql][0])
def get_post_sql(self, text: str) -> str:
return re.sub(r"[ \t\n\r]+", " ", [sql for sql in self.post_sqls if text in sql][0])
async def test_good(self):
await self.init_for("tortoise.tests.test__models__good")
self.assertIn("goodtournament", "; ".join(self.sqls))
self.assertIn("inaclasstournament", "; ".join(self.sqls))
self.assertNotIn("badtournament", "; ".join(self.sqls))
async def test_bad(self):
await self.init_for("tortoise.tests.test__models__bad")
self.assertNotIn("goodtournament", "; ".join(self.sqls))
self.assertNotIn("inaclasstournament", "; ".join(self.sqls))
self.assertIn("badtournament", "; ".join(self.sqls))
| 36.291667
| 99
| 0.578263
|
4a0719bb2ea2907585b32082a989be0909a2353d
| 19,825
|
py
|
Python
|
ml2p/core.py
|
prodigyfinance/ml2p
|
a6df55e9ce81e68619fd2f2891981a39a9186651
|
[
"0BSD"
] | 3
|
2021-10-11T05:35:45.000Z
|
2022-02-21T09:54:16.000Z
|
ml2p/core.py
|
hodgestar/ml2p
|
3f82e7fbf79345cead67ee18de88589a1ae82b97
|
[
"0BSD"
] | 7
|
2020-09-16T13:55:16.000Z
|
2021-06-11T08:38:03.000Z
|
ml2p/core.py
|
hodgestar/ml2p
|
3f82e7fbf79345cead67ee18de88589a1ae82b97
|
[
"0BSD"
] | 3
|
2020-09-15T14:38:25.000Z
|
2020-09-17T20:35:35.000Z
|
# -*- coding: utf-8 -*-
""" ML2P core utilities.
"""
import datetime
import enum
import importlib
import json
import os
import pathlib
import shutil
import tarfile
import urllib.parse
import uuid
import warnings
import boto3
import yaml
from . import __version__ as ml2p_version
from . import hyperparameters
from .errors import LocalEnvError
class ModellingProject:
""" Object for holding CLI context. """
def __init__(self, cfg):
with open(cfg) as f:
self.cfg = yaml.safe_load(f)
self.project = self.cfg["project"]
self.s3 = S3URL(self.cfg["s3folder"])
self.train = ModellingSubCfg(self.cfg, "train")
self.deploy = ModellingSubCfg(self.cfg, "deploy")
self.notebook = ModellingSubCfg(self.cfg, "notebook")
self.models = ModellingSubCfg(self.cfg, "models", defaults="models")
def full_job_name(self, job_name):
return "{}-{}".format(self.project, job_name)
def tags(self):
return [{"Key": "ml2p-project", "Value": self.cfg["project"]}]
class ModellingSubCfg:
""" Holder for training or deployment config. """
def __init__(self, cfg, section, defaults="defaults"):
self._cfg = cfg
self._defaults = cfg.get(defaults, {})
self._section = cfg.get(section, {})
def __getattr__(self, name):
if name in self._section:
return self._section[name]
return self._defaults[name]
def __getitem__(self, name):
if name in self._section:
return self._section[name]
return self._defaults[name]
def __setitem__(self, name, value):
self._section[name] = value
def keys(self):
keys = set(self._section.keys())
keys.update(self._defaults.keys())
return sorted(keys)
def get(self, name, default=None):
if name in self._section:
return self._section[name]
return self._defaults.get(name, default)
class S3URL:
""" A friendly interface to an S3 URL. """
def __init__(self, s3folder):
self._s3url = urllib.parse.urlparse(s3folder)
self._s3root = self._s3url.path.strip("/")
def bucket(self):
""" Return the bucket of the S3 URL.
:rtype: str
:returns:
The bucket of the S3 URL.
"""
return self._s3url.netloc
def path(self, suffix):
""" Return the base path of the S3 URL followed by a '/' and the
given suffix.
:param str suffix:
The suffix to append.
:rtype: str
:returns:
The path with the suffix appended.
"""
path = self._s3root + "/" + suffix.lstrip("/")
return path.lstrip("/") # handles empty s3root
def url(self, suffix=""):
""" Return S3 URL followed by a '/' and the given suffix.
:param str suffix:
The suffix to append. Default: "".
:rtype: str
:returns:
The URL with the suffix appended.
"""
return "s3://{}/{}".format(self._s3url.netloc, self.path(suffix))
class SageMakerEnvType(enum.Enum):
""" The type of SageMakerEnvironment.
"""
TRAIN = "train"
SERVE = "serve"
LOCAL = "local"
class SageMakerEnv:
""" An interface to the SageMaker docker environment.
Attributes that are expected to be available in both training and serving
environments:
* `env_type` - Whether this is a training, serving or local environment
(type: ml2p.core.SageMakerEnvType).
* `project` - The ML2P project name (type: str).
* `model_cls` - The fulled dotted Python name of the ml2p.core.Model class to
be used for training and prediction (type: str). This may be None if the
docker image itself specifies the name with `ml2p-docker --model ...`.
* `s3` - The URL of the project S3 bucket (type: ml2p.core.S3URL).
Attributes that are only expected to be available while training (and that will
be None when serving the model):
* `training_job_name` - The full job name of the training job (type: str).
Attributes that are only expected to be available while serving the model (and
that will be None when serving the model):
* `model_version` - The full job name of the deployed model, or None
during training (type: str).
* `record_invokes` - Whether to store a record of each invocation of the
endpoint in S3 (type: bool).
In the training environment settings are loaded from hyperparameters stored by
ML2P when the training job is created.
In the serving environment settings are loaded from environment variables stored
by ML2P when the model is created.
"""
TRAIN = SageMakerEnvType.TRAIN
SERVE = SageMakerEnvType.SERVE
LOCAL = SageMakerEnvType.LOCAL
def __init__(self, ml_folder, environ=None):
self._ml_folder = pathlib.Path(ml_folder)
if environ is None:
if "TRAINING_JOB_NAME" in os.environ:
# this is a training job instance
environ = self._train_environ()
else:
# this is a serving instance
environ = self._serve_environ()
self.env_type = environ["env_type"]
self.training_job_name = environ["training_job_name"]
self.model_version = environ["model_version"]
self.record_invokes = environ["record_invokes"]
self.project = environ["project"]
self.model_cls = environ["model_cls"]
self.s3 = None
if environ["s3_url"]:
self.s3 = S3URL(environ["s3_url"])
def _train_environ(self):
environ = self.hyperparameters().get("ML2P_ENV", {})
return {
"env_type": self.TRAIN,
"training_job_name": os.environ.get("TRAINING_JOB_NAME", None),
"model_version": None,
"record_invokes": None,
"project": environ.get("ML2P_PROJECT", None),
"model_cls": environ.get("ML2P_MODEL_CLS", None),
"s3_url": environ.get("ML2P_S3_URL", None),
}
def _serve_environ(self):
environ = os.environ
return {
"env_type": self.SERVE,
"training_job_name": None,
"model_version": environ.get("ML2P_MODEL_VERSION", None),
"record_invokes": environ.get("ML2P_RECORD_INVOKES", "false") == "true",
"project": environ.get("ML2P_PROJECT", None),
"model_cls": environ.get("ML2P_MODEL_CLS", None),
"s3_url": environ.get("ML2P_S3_URL", None),
}
def hyperparameters(self):
hp_path = self._ml_folder / "input" / "config" / "hyperparameters.json"
if not hp_path.exists():
return {}
with hp_path.open() as f:
return hyperparameters.decode(json.load(f))
def resourceconfig(self):
rc_path = self._ml_folder / "input" / "config" / "resourceconfig.json"
if not rc_path.exists():
return {}
with rc_path.open() as f:
return json.load(f)
def dataset_folder(self, dataset=None):
if dataset is None:
dataset = "training"
else:
warnings.warn(
"Passing a dataset name to dataset_folder method(...) is deprecated."
" If you wish to access the ML2P training dataset, do not pass any"
" parameters. If you wish to access data for a specific channel, please"
" use data_channel_folder(...) instead, which matches the terminology"
" used by AWS SageMaker more accurately.",
DeprecationWarning,
)
return self._ml_folder / "input" / "data" / dataset
def data_channel_folder(self, channel):
return self._ml_folder / "input" / "data" / channel
def model_folder(self):
return self._ml_folder / "model"
def write_failure(self, text):
with open(self._ml_folder / "output" / "failure", "w") as f:
f.write(text)
class LocalEnv(SageMakerEnv):
""" An interface to a local dummy of the SageMaker environment.
:param str ml_folder:
The directory the environments files are stored in. An
error is raised if this directory does not exist. Files
and folders are created within this directory as needed.
:param str cfg:
The path to an ml2p.yml configuration file.
:param boto3.session.Session session:
A boto3 session object. Maybe be None if downloading files from
S3 is not required.
Attributes that are expected to be available in the local environment:
* `env_type` - Whether this is a training, serving or local environment
(type: ml2p.core.SageMakerEnvType).
* `project` - The ML2P project name (type: str).
* `s3` - The URL of the project S3 bucket (type: ml2p.core.S3URL).
* `model_version` - The fixed value "local" (type: str).
In the local environment settings are loaded directly from the ML2P
configuration file.
"""
def __init__(self, ml_folder, cfg, session=None):
self._session = session
self._prj = ModellingProject(cfg)
super().__init__(ml_folder, environ=self._local_environ())
if not self._ml_folder.is_dir():
raise LocalEnvError(f"Local environment folder {ml_folder} does not exist.")
self.model_folder().mkdir(exist_ok=True)
def _local_environ(self):
return {
"env_type": self.LOCAL,
"training_job_name": None,
"model_version": "local",
"record_invokes": False,
"project": self._prj.project,
"model_cls": None,
"s3_url": self._prj.s3.url(),
}
def clean_model_folder(self):
""" Remove and recreate the model folder.
This is useful to run before training a model if one wants to ensure
that the model folder is empty beforehand.
"""
model_folder = self.model_folder()
shutil.rmtree(model_folder)
model_folder.mkdir()
def download_dataset(self, dataset):
""" Download the given dataset from S3 into the local environment.
:param str dataset:
The name of the dataset in S3 to download.
"""
if self._session is None:
raise LocalEnvError("Downloading datasets requires a boto session.")
client = self._session.resource("s3")
bucket = client.Bucket(self.s3.bucket())
local_dataset = self.dataset_folder()
local_dataset.mkdir(parents=True, exist_ok=True)
s3_dataset = self.s3.path("datasets") + "/" + dataset
len_prefix = len(s3_dataset)
for s3_object in bucket.objects.filter(Prefix=s3_dataset):
if s3_object.key.endswith("/"):
# keys that end in a / are probably folders, so skip downloading them
continue
local_object = local_dataset / (s3_object.key[len_prefix:].lstrip("/"))
local_object.parent.mkdir(parents=True, exist_ok=True)
with local_object.open("wb") as f:
bucket.download_fileobj(s3_object.key, f)
def download_model(self, training_job):
""" Download the given trained model from S3 and unpack it into the local environment.
:param str training_job:
The name of the training job whose model should be downloaded.
"""
if self._session is None:
raise LocalEnvError("Downloading models requires a boto session.")
client = self._session.resource("s3")
bucket = client.Bucket(self.s3.bucket())
local_model_tgz = self.model_folder() / "model.tar.gz"
local_model_tgz.parent.mkdir(parents=True, exist_ok=True)
s3_model_tgz = (
self.s3.path("/models")
+ "/"
+ self._prj.full_job_name(training_job)
+ "/output/model.tar.gz"
)
with local_model_tgz.open("wb") as f:
bucket.download_fileobj(s3_model_tgz, f)
tf = tarfile.open(local_model_tgz)
tf.extractall(self.model_folder())
def import_string(name):
""" Import a class given its absolute name.
:param str name:
The name of the model, e.g. mypackage.submodule.ModelTrainerClass.
"""
modname, _, classname = name.rpartition(".")
mod = importlib.import_module(modname)
return getattr(mod, classname)
class ModelTrainer:
""" An interface that allows ml2p-docker to train models within SageMaker.
"""
def __init__(self, env):
self.env = env
def train(self):
""" Train the model.
This method should:
* Read training data (using self.env to determine where to read data from).
* Train the model.
* Write the model out (using self.env to determine where to write the model
to).
* Write out any validation or model analysis alongside the model.
"""
raise NotImplementedError("Sub-classes should implement .train()")
class ModelPredictor:
""" An interface that allows ml2p-docker to make predictions from a model within
SageMaker.
"""
def __init__(self, env):
self.env = env
self.s3_client = boto3.client("s3")
def setup(self):
""" Called once before any calls to .predict(...) are made.
This method should:
* Load the model (using self.env to determine where to read the model from).
* Allocate any other resources needed in order to make predictions.
"""
pass
def teardown(self):
""" Called once after all calls to .predict(...) have ended.
This method should:
* Cleanup any resources acquired in .setup().
"""
pass
def invoke(self, data):
""" Invokes the model and returns the full result.
:param dict data:
The input data the model is being invoked with.
:rtype: dict
:returns:
The result as a dictionary.
By default this method results a dictionary containing:
* metadata: The result of calling .metadata().
* result: The result of calling .result(data).
"""
prediction = {"metadata": self.metadata(), "result": self.result(data)}
if self.env.record_invokes:
self.record_invoke(data, prediction)
return prediction
def metadata(self):
""" Return metadata for a prediction that is about to be made.
:rtype: dict
:returns:
The metadata as a dictionary.
By default this method returns a dictionary containing:
* model_version: The ML2P_MODEL_VERSION (str).
* timestamp: The UTC POSIX timestamp in seconds (float).
"""
return {
"model_version": self.env.model_version,
"ml2p_version": ml2p_version,
"timestamp": datetime.datetime.utcnow().timestamp(),
}
def result(self, data):
""" Make a prediction given the input data.
:param dict data:
The input data to make a prediction from.
:rtype: dict
:returns:
The prediction result as a dictionary.
"""
raise NotImplementedError("Sub-classes should implement .result(...)")
def batch_invoke(self, data):
""" Invokes the model on a batch of input data and returns the full result for
each instance.
:param dict data:
The batch of input data the model is being invoked with.
:rtype: list
:returns:
The result as a list of dictionaries.
By default this method results a list of dictionaries containing:
* metadata: The result of calling .metadata().
* result: The result of calling .batch_result(data).
"""
metadata = self.metadata()
results = self.batch_result(data)
predictions = [{"metadata": metadata, "result": result} for result in results]
if self.env.record_invokes:
for datum, prediction in zip(data, predictions):
self.record_invoke(datum, prediction)
return {"predictions": predictions}
def batch_result(self, data):
""" Make a batch prediction given a batch of input data.
:param dict data:
The batch of input data to make a prediction from.
:rtype: list
:returns:
The list of predictions made for instance of the input data.
This method can be overrided for sub-classes in order to improve
performance of batch predictions.
"""
return [self.result(datum) for datum in data]
def record_invoke_id(self, datum, prediction):
""" Return an id for an invocation record.
:param dict datum:
The dictionary of input values passed when invoking the endpoint.
:param dict result:
The prediction returned for datum by this predictor.
:returns dict:
Returns an *ordered* dictionary of key-value pairs that make up
the unique identifier for the invocation request.
By default this method returns a dictionary containing the following:
* "ts": an ISO8601 formatted UTC timestamp.
* "uuid": a UUID4 unique identifier.
Sub-classes may override this method to return their own identifiers,
but including these default identifiers is recommended.
The name of the record in S3 is determined by combining the key value pairs
with a dash ("-") and then separating each pair with a double dash ("--").
"""
return {"ts": datetime.datetime.utcnow().isoformat(), "uuid": str(uuid.uuid4())}
def record_invoke(self, datum, prediction):
""" Store an invocation of the endpoint in the ML2P project S3 bucket.
:param dict datum:
The dictionary of input values passed when invoking the endpoint.
:param dict result:
The prediction returned for datum by this predictor.
"""
invoke_id = self.record_invoke_id(datum, prediction)
record_filename = (
"--".join(["{}-{}".format(k, v) for k, v in invoke_id.items()]) + ".json"
)
record = {"input": datum, "result": prediction}
record_bytes = json.dumps(record).encode("utf-8")
s3_key = self.env.s3.path(
"/predictions/{}/{}".format(self.env.model_version, record_filename)
)
self.s3_client.put_object(
Bucket=self.env.s3.bucket(), Key=s3_key, Body=record_bytes
)
class Model:
""" A holder for a trainer and predictor.
Sub-classes should:
* Set the attribute TRAINER to a ModelTrainer sub-class.
* Set the attribute PREDICTOR to a ModelPredictor sub-class.
"""
TRAINER = None
PREDICTOR = None
def trainer(self, env):
if self.TRAINER is None:
raise ValueError(".TRAINER should be an instance of ModelTrainer")
return self.TRAINER(env)
def predictor(self, env):
if self.PREDICTOR is None:
raise ValueError(".PREDICTOR should be an instance of ModelPredictor")
return self.PREDICTOR(env)
| 34.299308
| 94
| 0.602522
|
4a071a43ee82452c9afe884d55d9250d466d7373
| 729
|
py
|
Python
|
var/spack/repos/builtin/packages/libxkbfile/package.py
|
lguyot/spack
|
e910c227a7bac3adf2c18fc86cf994811b7d14f7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/libxkbfile/package.py
|
lguyot/spack
|
e910c227a7bac3adf2c18fc86cf994811b7d14f7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/libxkbfile/package.py
|
lguyot/spack
|
e910c227a7bac3adf2c18fc86cf994811b7d14f7
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libxkbfile(AutotoolsPackage):
"""XKB file handling routines."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libxkbfile"
url = "https://www.x.org/archive/individual/lib/libxkbfile-1.0.9.tar.gz"
version('1.0.9', sha256='95df50570f38e720fb79976f603761ae6eff761613eb56f258c3cb6bab4fd5e3')
depends_on('libx11')
depends_on('libxcb')
depends_on('kbproto')
depends_on('xproto')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 30.375
| 95
| 0.721536
|
4a071a8dd5b2267af07957fcfabbfeba31502d88
| 695
|
py
|
Python
|
testsuite/cases/pgmagick_load.py
|
jcupitt/pillow-perf
|
dc71bf8597f73ced42724a2203867ba4000e0640
|
[
"MIT"
] | null | null | null |
testsuite/cases/pgmagick_load.py
|
jcupitt/pillow-perf
|
dc71bf8597f73ced42724a2203867ba4000e0640
|
[
"MIT"
] | null | null | null |
testsuite/cases/pgmagick_load.py
|
jcupitt/pillow-perf
|
dc71bf8597f73ced42724a2203867ba4000e0640
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import print_function, unicode_literals, absolute_import
from .base import rpartial, root, BaseLoadCase, BaseSaveCase
from .pgmagick import Image, Blob
class LoadCase(BaseLoadCase):
def runner(self):
Image(root('resources', self.filename).encode('utf-8'))
class SaveCase(BaseSaveCase):
def create_test_data(self):
im = Image(root('resources', self.filename).encode('utf-8'))
return [im]
def runner(self, im):
im.quality(85)
im.magick(self.filetype.encode('utf-8'))
im.write(Blob())
cases = [
rpartial(LoadCase, 'JPEG', 'pineapple.jpeg'),
rpartial(SaveCase, 'JPEG', 'pineapple.jpeg'),
]
| 23.965517
| 72
| 0.670504
|
4a071b5a152dc601ae13919b8084bed30100e3f3
| 715
|
py
|
Python
|
zerver/management/commands/purge_queue.py
|
enterstudio/zulip
|
f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/purge_queue.py
|
enterstudio/zulip
|
f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325
|
[
"Apache-2.0"
] | null | null | null |
zerver/management/commands/purge_queue.py
|
enterstudio/zulip
|
f6f8f1fe36ac7d82bc0a5effc00a47e460f0b325
|
[
"Apache-2.0"
] | 1
|
2017-03-19T14:40:16.000Z
|
2017-03-19T14:40:16.000Z
|
from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.core.management import CommandError
from zerver.lib.queue import SimpleQueueClient
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('queue_name', metavar='<queue name>', type=str,
help="queue to purge")
help = "Discards all messages from the given queue"
def handle(self, *args, **options):
queue_name = options['queue_name']
queue = SimpleQueueClient()
queue.ensure_queue(queue_name, lambda: None)
queue.channel.queue_purge(queue_name)
print("Done")
| 35.75
| 75
| 0.706294
|
4a071c2fe74f193abdb69912bb72d0bbe11352b2
| 49,995
|
py
|
Python
|
python/paddle/nn/functional/activation.py
|
HydrogenSulfate/Paddle
|
42cfd15e672e1ed7ad0242c1ae9e492f197599d6
|
[
"Apache-2.0"
] | 2
|
2019-05-16T03:09:06.000Z
|
2022-01-14T07:06:37.000Z
|
python/paddle/nn/functional/activation.py
|
HydrogenSulfate/Paddle
|
42cfd15e672e1ed7ad0242c1ae9e492f197599d6
|
[
"Apache-2.0"
] | 1
|
2020-09-08T01:45:28.000Z
|
2020-09-08T01:45:28.000Z
|
python/paddle/nn/functional/activation.py
|
HydrogenSulfate/Paddle
|
42cfd15e672e1ed7ad0242c1ae9e492f197599d6
|
[
"Apache-2.0"
] | 5
|
2021-12-10T11:20:06.000Z
|
2022-02-18T05:18:12.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...fluid.layers import sigmoid # noqa: F401
from ...tensor.math import tanh # noqa: F401
from ...tensor.math import tanh_ # noqa: F401
from ...fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ...tensor.manipulation import chunk
from ...tensor.math import multiply
import warnings
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
from ...fluid import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle
from paddle import _C_ops
__all__ = []
def celu(x, alpha=1.0, name=None):
r"""
celu activation.
.. math::
celu(x) = max(0, x) + min(0, \alpha * (e^{x/\alpha}-1))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the CELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.celu(x, alpha=0.2)
# [[-0.19865242, 6. ],
# [ 1. , 15.60000038]]
"""
if alpha == 0:
raise ZeroDivisionError("alpha cannot be 0 for celu")
if in_dygraph_mode():
return _C_ops.celu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu')
helper = LayerHelper("celu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='celu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
def elu(x, alpha=1.0, name=None):
r"""
elu activation.
.. math::
elu(x)=
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * (e^{x} - 1),& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
alpha (float, optional): The 'alpha' value of the ELU formulation. Default is 1.0.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1., 6.], [1., 15.6]])
out = F.elu(x, alpha=0.2)
# [[-0.12642411 6. ]
# [ 1. 15.6 ]]
"""
if in_dygraph_mode():
return _C_ops.elu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='elu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@inplace_apis_in_dygraph_only
def elu_(x, alpha=1.0, name=None):
r"""
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_elu`.
"""
assert alpha >= 0., "elu_ only support alpha >= 0, please use elu instead."
return _C_ops.elu_(x, 'alpha', alpha)
def gelu(x, approximate=False, name=None):
r"""
gelu activation.
if approximate is True
.. math::
gelu(x) = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
else
.. math::
gelu(x) = 0.5 * x * (1 + erf(\frac{x}{\sqrt{2}}))
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
approximate (bool, optional): Wether to enable approximation. Default is False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([[-1, 0.5], [1, 1.5]])
out1 = F.gelu(x)
# [[-0.15865529, 0.34573123],
# [ 0.84134471, 1.39978933]]
out2 = F.gelu(x, True)
# [[-0.15880799, 0.34571400],
# [ 0.84119201, 1.39957154]]
"""
if in_dygraph_mode():
return _C_ops.gelu(x, 'approximate', approximate)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
helper = LayerHelper("gelu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'approximate': approximate})
return out
def hardshrink(x, threshold=0.5, name=None):
r"""
hard shrinkage activation
.. math::
hardshrink(x)=
\left\{
\begin{array}{rcl}
x,& &if \ {x > threshold} \\
x,& &if \ {x < -threshold} \\
0,& &if \ {others} &
\end{array}
\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for hardthrink. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-1, 0.3, 2.5])
out = F.hardshrink(x) # [-1., 0., 2.5]
"""
if in_dygraph_mode():
return _C_ops.hard_shrink(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardshrink')
helper = LayerHelper('hardshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_shrink',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def hardtanh(x, min=-1.0, max=1.0, name=None):
r"""
hardtanh activation
.. math::
hardtanh(x)=
\left\{
\begin{array}{cll}
max,& & \text{if } x > max \\
min,& & \text{if } x < min \\
x,& & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
min (float, optional): The minimum value of the linear region range. Default is -1.
max (float, optional): The maximum value of the linear region range. Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1.5, 0.3, 2.5]))
out = F.hardtanh(x) # [-1., 0.3, 1.]
"""
if in_dygraph_mode():
return _C_ops.brelu(x, 't_min', min, 't_max', max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardtanh')
helper = LayerHelper('hardtanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': min,
't_max': max})
return out
def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
r"""
hardsigmoid activation.
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid.
.. math::
hardsigmoid(x)=
\left\{
\begin{array}{lcl}
0, & &\text{if } \ x \leq -3 \\
1, & &\text{if } \ x \geq 3 \\
slope * x + offset, & &\text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
slope (float, optional): The slope of hardsigmoid function. Default is 0.1666667.
offset (float, optional): The offset of hardsigmoid function. Default is 0.5.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardsigmoid(x) # [0., 1., 0.666667]
"""
if in_dygraph_mode():
return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardsigmoid')
helper = LayerHelper('hardsigmoid', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='hard_sigmoid',
inputs={'X': x},
outputs={'Out': out},
attrs={'slope': slope,
'offset': offset})
return out
def hardswish(x, name=None):
r"""
hardswish activation
hardswish is proposed in MobileNetV3, and performs better in computational stability
and efficiency compared to swish function. For more details please refer
to: https://arxiv.org/pdf/1905.02244.pdf
.. math::
hardswish(x)=
\left\{
\begin{array}{cll}
0 &, & \text{if } x \leq -3 \\
x &, & \text{if } x \geq 3 \\
\frac{x(x+3)}{6} &, & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardswish(x) # [0., 5., 0.666667]
"""
if in_dygraph_mode():
return _C_ops.hard_swish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardswish')
helper = LayerHelper('hardswish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
return out
def leaky_relu(x, negative_slope=0.01, name=None):
r"""
leaky_relu activation
.. math::
leaky\_relu(x)=
\left\{
\begin{array}{rcl}
x, & & if \ x >= 0 \\
negative\_slope * x, & & otherwise \\
\end{array}
\right.
Args:
x (Tensor): The input Tensor with data type float32, float64.
negative_slope (float, optional): Slope of the activation function at
:math:`x < 0` . Default is 0.01.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([-2., 0., 1.])
out = F.leaky_relu(x) # [-0.02, 0., 1.]
"""
if in_dygraph_mode():
return _C_ops.leaky_relu(x, 'alpha', negative_slope)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'leaky_relu')
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='leaky_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': negative_slope})
return out
def prelu(x, weight, data_format="NCHW", name=None):
"""
prelu activation.
.. math::
prelu(x) = max(0, x) + weight * min(0, x)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
weight (Tensor): The learnable parameter with data type same as ``x``.
The weight shape is [1] or [in], where `in` is the input channel of ``x``.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
data_format(str, optional): Data format that specifies the layout of input.
It may be "NC", "NCL", "NCHW", "NCDHW", "NLC", "NHWC" or "NDHWC". Default: "NCHW".
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
data = np.array([[[[-2.0, 3.0, -4.0, 5.0],
[ 3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[ 1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[ 6.0, 7.0, 8.0, 9.0]]]], 'float32')
x = paddle.to_tensor(data)
w = paddle.to_tensor(np.array([0.25]).astype('float32'))
out = F.prelu(x, w)
# [[[[-0.5 , 3. , -1. , 5. ],
# [ 3. , -1. , 5. , -1.5 ],
# [-1.75, -2. , 8. , 9. ]],
# [[ 1. , -0.5 , -0.75, 4. ],
# [-1.25, 6. , 7. , -2. ],
# [ 6. , 7. , 8. , 9. ]]]]
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
check_variable_and_dtype(weight, 'weight',
['float16', 'float32', 'float64'], 'prelu')
assert len(weight.shape
) == 1, "The dim count of weight shape should be 1 in prelu()."
mode = 'all'
if weight.shape[0] > 1:
true_data_format = [
'NC', 'NCL', 'NCHW', 'NCDHW', 'NLC', 'NHWC', 'NDHWC'
]
if data_format not in true_data_format:
raise ValueError(
"data_format must be one of 'NC', 'NCL', 'NCHW', 'NCDHW', "
"'NLC', 'NHWC', 'NDHWC' but receive {}".format(data_format))
data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC'
assert len(
x.shape
) > 1, "The dim count of x should be equal or larger than 2 in prelu() when weight shape is not [1]."
#NOTE(GuoxiaWang): support NHWC data format
if data_format == 'NHWC':
assert weight.shape[0] == x.shape[
-1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
else:
assert weight.shape[0] == x.shape[
1], "The weight size should be equal to x input channel in prelu() when weight shape is not [1]."
mode = 'channel'
if in_dygraph_mode():
return _C_ops.prelu(x, weight, 'mode', mode, 'data_format', data_format)
helper = LayerHelper('prelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type="prelu",
inputs={"X": x,
"Alpha": weight},
outputs={"Out": out},
attrs={"mode": mode,
"data_format": data_format})
return out
def relu(x, name=None):
"""
relu activation.
.. math::
out = max(x, 0)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2, 0, 1]).astype('float32'))
out = F.relu(x) # [0., 0., 1.]
"""
if in_dygraph_mode():
return _C_ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
helper = LayerHelper('relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def relu_(x, name=None):
"""
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
"""
return _C_ops.relu_(x)
def log_sigmoid(x, name=None):
r"""
log_sigmoid activation.
.. math::
log\_sigmoid(x) = log \frac{1}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499]
"""
if in_dygraph_mode():
return _C_ops.logsigmoid(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'log_sigmoid')
helper = LayerHelper("log_sigmoid", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
return out
def maxout(x, groups, axis=1, name=None):
r"""
maxout activation.
Assumed the input shape is (N, Ci, H, W).
The output shape is (N, Co, H, W).
Then Co = Ci/groups and the operator formula is as follows:
.. math::
\begin{array}{l}
&out_{si+j} = \max_{k} x_{gsi + sk + j} \\
&g = groups \\
&s = \frac{input.size}{num\_channels} \\
&0 \le i < \frac{num\_channels}{groups} \\
&0 \le j < s \\
&0 \le k < groups
\end{array}
Parameters:
x (Tensor): The input is 4-D Tensor with shape [N, C, H, W] or [N, H, W, C], the data type
of input is float32 or float64.
groups (int, optional): The groups number of maxout. `groups` specifies the
index of channel dimension where maxout will be performed. This must be
a factor of number of features. Default is 1.
axis (int, optional): The axis along which to perform maxout calculations.
It should be 1 when data format is NCHW, be -1 or 3 when data format
is NHWC. If ``axis`` < 0, it works the same way as :math:`axis + D` ,
where D is the dimensions of ``x`` . ``axis`` only supports 1, 3 or -1.
Default is 1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.rand([1, 2, 3, 4])
# [[[[0.5002636 0.22272532 0.17402348 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.02879342 0.88725346 0.61093384 0.38833922]]
# [[0.5231306 0.03807496 0.91661984 0.15602879]
# [0.666127 0.616567 0.30741522 0.24044901]
# [0.7142536 0.7351477 0.31588817 0.23782359]]]]
out = F.maxout(x, groups=2)
# [[[[0.5231306 0.22272532 0.91661984 0.2874594 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
if in_dygraph_mode():
return _C_ops.maxout(x, 'groups', groups, 'axis', axis)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis))
if axis == -1:
axis = 3
helper = LayerHelper('maxout', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='maxout',
inputs={'X': x},
outputs={'Out': out},
attrs={'groups': groups,
'axis': axis})
return out
def relu6(x, name=None):
"""
relu6 activation
.. math::
relu6(x) = min(max(0,x), 6)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-1, 0.3, 6.5]))
out = F.relu6(x) # [0, 0.3, 6]
"""
threshold = 6.0
if in_dygraph_mode():
return _C_ops.relu6(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')
helper = LayerHelper('relu6', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='relu6',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def selu(x,
scale=1.0507009873554804934193349852946,
alpha=1.6732632423543772848170429916717,
name=None):
r"""
selu activation
.. math::
selu(x)= scale *
\left\{
\begin{array}{lcl}
x,& &\text{if } \ x > 0 \\
alpha * e^{x} - alpha,& &\text{if } \ x <= 0
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
scale (float, optional): The value of scale(must be greater than 1.0) for selu. Default is 1.0507009873554804934193349852946
alpha (float, optional): The value of alpha(must be no less than zero) for selu. Default is 1.6732632423543772848170429916717
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([[0.0, 1.0],[2.0, 3.0]]))
out = F.selu(x) # [[0, 1.050701],[2.101402, 3.152103]]
"""
if scale <= 1.0:
raise ValueError(
"The scale must be greater than 1.0. Received: {}.".format(scale))
if alpha < 0:
raise ValueError(
"The alpha must be no less than zero. Received: {}.".format(alpha))
if in_dygraph_mode():
return _C_ops.selu(x, 'scale', scale, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu')
helper = LayerHelper('selu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='selu',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale': scale,
'alpha': alpha})
return out
def silu(x, name=None):
r"""
silu activation
.. math::
silu(x) = \frac{x}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ]
"""
if in_dygraph_mode():
return _C_ops.silu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu')
helper = LayerHelper("silu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='silu', inputs={'X': x}, outputs={'Out': out})
return out
def softmax(x, axis=-1, dtype=None, name=None):
r"""
This operator implements the softmax layer. The calculation process is as follows:
1. The dimension :attr:`axis` of ``x`` will be permuted to the last.
2. Then ``x`` will be logically flattened to a 2-D matrix. The matrix's second
dimension(row length) is the same as the dimension :attr:`axis` of ``x``,
and the first dimension(column length) is the product of all other dimensions
of ``x``. For each row of the matrix, the softmax operator squashes the
K-dimensional(K is the width of the matrix, which is also the size of ``x``'s
dimension :attr:`axis`) vector of arbitrary real values to a K-dimensional
vector of real values in the range [0, 1] that add up to 1.
3. After the softmax operation is completed, the inverse operations of steps 1 and 2
are performed to restore the two-dimensional matrix to the same dimension as the ``x`` .
It computes the exponential of the given dimension and the sum of exponential
values of all the other dimensions in the K-dimensional vector input.
Then the ratio of the exponential of the given dimension and the sum of
exponential values of all the other dimensions is the output of the softmax
operator.
For each row :math:`i` and each column :math:`j` in the matrix, we have:
.. math::
softmax[i, j] = \frac{\exp(x[i, j])}{\sum_j(exp(x[i, j])}
Example:
.. code-block:: text
Case 1:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = -1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.07232949, 0.19661193, 0.19661193, 0.53444665]],
[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
[0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
Case 2:
Input:
x.shape = [2, 3, 4]
x.data = [[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]]
Attrs:
axis = 1
Output:
out.shape = [2, 3, 4]
out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
[0.01786798, 0.01786798, 0.04661262, 0.04661262],
[0.97555875, 0.97555875, 0.93623955, 0.93623955]],
[[0.00490169, 0.00490169, 0.00490169, 0.00490169],
[0.26762315, 0.26762315, 0.26762315, 0.26762315],
[0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str, optional): The data type of the output tensor, can be float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same shape and data type (use ``dtype`` if it is
specified) as x.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = np.array([[[2.0, 3.0, 4.0, 5.0],
[3.0, 4.0, 5.0, 6.0],
[7.0, 8.0, 8.0, 9.0]],
[[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[6.0, 7.0, 8.0, 9.0]]], 'float32')
x = paddle.to_tensor(x)
out1 = F.softmax(x)
out2 = F.softmax(x, dtype='float64')
# out1's data type is float32; out2's data type is float64
# out1 and out2's value is as follows:
# [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
# [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
# [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
if in_dygraph_mode():
outs_cast = x if dtype is None \
else _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.softmax(outs_cast, 'axis', axis, 'use_cudnn', use_cudnn)
if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softmax')
else:
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'softmax',
'If dtype is not None, it only support float32 or float64.')
helper = LayerHelper("softmax", **locals())
outs_cast = x
if dtype is not None:
outs_cast = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='cast',
inputs={'X': x},
outputs={'Out': outs_cast},
attrs={'in_dtype': x.dtype,
'out_dtype': dtype})
outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype)
helper.append_op(
type='softmax',
inputs={'X': outs_cast},
outputs={'Out': outs_softmax},
attrs={'axis': axis,
'use_cudnn': use_cudnn})
return outs_softmax
@inplace_apis_in_dygraph_only
def softmax_(x, axis=-1, dtype=None, name=None):
r"""
Inplace version of ``softmax`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_softmax`.
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
return _C_ops.softmax_(x, 'axis', axis, 'use_cudnn', use_cudnn)
def softplus(x, beta=1, threshold=20, name=None):
r"""
softplus activation
.. math::
softplus(x) = \frac{1}{beta} * \log(1 + e^{beta * x}) \\
\text{For numerical stability, the implementation reverts to the linear function when: beta * x > threshold.}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
beta (float, optional): The value of beta for softplus. Default is 1
threshold (float, optional): The value of threshold for softplus. Default is 20
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
if in_dygraph_mode():
return _C_ops.softplus(x, 'beta', beta, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softplus')
helper = LayerHelper('softplus', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='softplus',
inputs={'X': x},
outputs={'Out': out},
attrs={'beta': beta,
'threshold': threshold})
return out
def softshrink(x, threshold=0.5, name=None):
r"""
softshrink activation
.. math::
softshrink(x)=
\left\{
\begin{array}{rcl}
x - threshold,& & \text{if } x > threshold \\
x + threshold,& & \text{if } x < -threshold \\
0,& & \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold(must be no less than zero) for softplus. Default is 0.5
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.9, -0.2, 0.1, 0.8]))
out = F.softshrink(x) # [-0.4, 0, 0, 0.3]
"""
if threshold < 0:
raise ValueError(
"The threshold must be no less than zero. Received: {}.".format(
threshold))
if in_dygraph_mode():
return _C_ops.softshrink(x, 'lambda', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softshrink')
helper = LayerHelper('softshrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='softshrink',
inputs={'X': x},
outputs={'Out': out},
attrs={'lambda': threshold})
return out
def softsign(x, name=None):
r"""
softsign activation
.. math::
softsign(x) = \frac{x}{1 + |x|}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
if in_dygraph_mode():
return _C_ops.softsign(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'softsign')
helper = LayerHelper('softsign', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='softsign', inputs={'X': x}, outputs={'Out': out})
return out
def swish(x, name=None):
r"""
swish activation.
.. math::
swish(x) = \frac{x}{1 + e^{-x}}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-2., 0., 1.]))
out = F.swish(x) # [-0.238406, 0., 0.731059]
"""
if in_dygraph_mode():
return _C_ops.swish(x, 'beta', 1.0)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'beta': 1.0})
return out
def tanhshrink(x, name=None):
"""
tanhshrink activation
.. math::
tanhshrink(x) = x - tanh(x)
Args:
x (Tensor): The input Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([-0.4, -0.2, 0.1, 0.3]))
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
if in_dygraph_mode():
return _C_ops.tanh_shrink(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'tanhshrink')
helper = LayerHelper('tanh_shrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out})
return out
def thresholded_relu(x, threshold=1.0, name=None):
r"""
thresholded relu activation.
.. math::
thresholded\_relu(x) =
\left\{
\begin{array}{rl}
x,& \text{if } \ x > threshold \\
0,& \text{otherwise}
\end{array}
\right.
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
threshold (float, optional): The value of threshold for thresholded_relu. Default is 1.0
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
import numpy as np
x = paddle.to_tensor(np.array([2., 0., 1.]))
out = F.thresholded_relu(x) # [2., 0., 0.]
"""
if in_dygraph_mode():
return _C_ops.thresholded_relu(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'thresholded_relu')
helper = LayerHelper('thresholded_relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='thresholded_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def log_softmax(x, axis=-1, dtype=None, name=None):
r"""
This operator implements the log_softmax layer. The calculation process is
as follows:
.. math::
\begin{aligned}
log\_softmax[i, j] &= log(softmax(x)) \\
&= log(\frac{\exp(X[i, j])}{\sum_j(\exp(X[i, j])})
\end{aligned}
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which to perform log_softmax
calculations. It should be in range [-D, D), where D is the
dimensions of ``x`` . If ``axis`` < 0, it works the same way as
:math:`axis + D` . Default is -1.
dtype (str|np.dtype|core.VarDesc.VarType, optional): The desired data
type of the output tensor. If dtype is specified, ``x`` is casted
to ``dtype`` before the operation is performed. This is useful for
preventing data type overflows. Supported dtype: float32, float64.
If ``dtype`` is None, the output Tensor has the same dtype as x.
Default is None.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same shape and data type (use ``dtype`` if it is
specified) as x.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
x = [[[-2.0, 3.0, -4.0, 5.0],
[3.0, -4.0, 5.0, -6.0],
[-7.0, -8.0, 8.0, 9.0]],
[[1.0, -2.0, -3.0, 4.0],
[-5.0, 6.0, 7.0, -8.0],
[6.0, 7.0, 8.0, 9.0]]]
x = paddle.to_tensor(x)
out1 = F.log_softmax(x)
out2 = F.log_softmax(x, dtype='float64')
# out1's data type is float32; out2's data type is float64
# out1 and out2's value is as follows:
# [[[ -7.1278396 -2.1278396 -9.127839 -0.12783948]
# [ -2.1270514 -9.127051 -0.12705144 -11.127051 ]
# [-16.313261 -17.313261 -1.3132617 -0.31326184]]
# [[ -3.0518122 -6.051812 -7.051812 -0.051812 ]
# [-12.313267 -1.3132664 -0.3132665 -15.313267 ]
# [ -3.4401896 -2.4401896 -1.4401896 -0.44018966]]]
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
if dtype is not None:
x = _C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _C_ops.log_softmax(x, 'axis', axis)
if dtype is None:
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'log_softmax')
else:
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'log_softmax',
'If dtype is not None, it only support float32 or float64.')
helper = LayerHelper("log_softmax", **locals())
out_cast = x
if dtype is not None:
out_cast = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={'in_dtype': x.dtype,
'out_dtype': dtype})
out = helper.create_variable_for_type_inference(out_cast.dtype)
helper.append_op(
type='log_softmax',
inputs={'X': out_cast},
outputs={'Out': out},
attrs={'axis': axis})
return out
def glu(x, axis=-1, name=None):
r"""
The gated linear unit. The input is evenly splited into 2 parts along a
given axis. The first part is used as the content, and the second part is
passed through a sigmoid function then used as the gate. The output is a
elementwise multiplication of the content and the gate.
.. math::
\mathrm{GLU}(a, b) = a \otimes \sigma(b)
Parameters:
x (Tensor): The input Tensor with data type float32, float64.
axis (int, optional): The axis along which split the input tensor. It
should be in range [-D, D), where D is the dimensions of ``x`` .
If ``axis`` < 0, it works the same way as :math:`axis + D` .
Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Tensor with the same data type as x. The size of the given aixs is
halved.
Examples:
.. code-block:: python
import paddle
from paddle.nn import functional as F
x = paddle.to_tensor(
[[-0.22014759, -1.76358426, 0.80566144, 0.04241343],
[-1.94900405, -1.89956081, 0.17134808, -1.11280477]]
)
print(F.glu(x).numpy())
# array([[-0.15216254, -0.9004892 ],
# [-1.0577879 , -0.46985325]], dtype=float32)
"""
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
"glu")
a, b = chunk(x, 2, axis=axis, name=name)
gate = sigmoid(b, name=name)
out = paddle.multiply(a, gate, name=name)
return out
def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
r"""
Samples from the Gumbel-Softmax distribution and optionally discretizes.
temperature is denoted by t. The calculation process is as follows:
First, generate gumbel noise:
.. math::
G_i = -log(-log(U_i)), U_i \sim U(0,1)
Second, add noise to ``x``:
.. math::
v = [x_1 + G_1,...,x_n + G_n]
Finally, calculate gumbel_softmax and generate samples:
.. math::
gumbel\_softmax(v_i)=\frac{e^{v_i/t}}{\sum_{j=1}^n{e^{v_j/t}}},i=1,2,3...n
Parameters:
x (Tensor): An N-D Tensor, the first N - 1 dimensions index into a batch
of independent distributions and the last dimension represents
a vector of probabilities with datatype float32, float64.
temperature (float, optional): non-negative scalar temperature.
Default is 1.0.
hard (bool, optional): if True, the returned samples will be discretized as
one-hot vectors, but will be differentiated as if it is the soft sample
in autograd. Default is False.
axis (int, optional): The axis along will be calculated softmax value.
Default is -1.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Sampled tensor of same shape as ``x`` from the Gumbel-Softmax distribution.
If ``hard = True``, the returned samples will be one-hot, otherwise they will be
probability distributions that sum to 1 across ``axis``.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
logits = paddle.randn([4, 6])
temperature = 0.01
gumbel_softmax = F.gumbel_softmax(logits, temperature)
print(gumbel_softmax)
# out's value is as follows:
# [[0.00000001, 1. , 0.00000000, 0.00000000, 0.00000006, 0.00000000],
# [0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 1. ],
# [0.00000062, 0.00000000, 0.00000000, 0.00000000, 0.00000000, 0.99999940],
# [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]]
"""
if in_dygraph_mode():
return _C_ops.gumbel_softmax(x, 'temperature', temperature, 'hard',
hard, 'axis', axis)
helper = LayerHelper("gumbel_softmax", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'gumbel_softmax')
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='gumbel_softmax',
inputs={'X': x},
outputs={'Out': out},
attrs={'temperature': temperature,
'hard': hard,
'axis': axis})
return out
| 33.871951
| 133
| 0.556436
|
4a071ca58a5288faa549b1cd4a60401921acf219
| 9,516
|
py
|
Python
|
ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
|
hortonworks/ambari-perf
|
71305effa9ac00e2e9adb36e6a66a13c9105a811
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
|
hortonworks/ambari-perf
|
71305effa9ac00e2e9adb36e6a66a13c9105a811
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
ambari-common/src/main/python/resource_management/libraries/functions/setup_ranger_plugin_xml.py
|
hortonworks/ambari-perf
|
71305effa9ac00e2e9adb36e6a66a13c9105a811
|
[
"Apache-2.0",
"MIT"
] | 3
|
2017-10-31T11:42:31.000Z
|
2021-04-26T07:17:53.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["setup_ranger_plugin"]
import os
from datetime import datetime
from resource_management.libraries.functions.ranger_functions import Rangeradmin
from resource_management.core.resources import File, Directory, Execute
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_stack_version import get_stack_version
from resource_management.core.logger import Logger
from resource_management.core.source import DownloadSource, InlineTemplate
from resource_management.libraries.functions.ranger_functions_v2 import RangeradminV2
from resource_management.core.utils import PasswordString
from resource_management.libraries.script.script import Script
def setup_ranger_plugin(component_select_name, service_name,
component_downloaded_custom_connector, component_driver_curl_source,
component_driver_curl_target, java_home,
repo_name, plugin_repo_dict,
ranger_env_properties, plugin_properties,
policy_user, policymgr_mgr_url,
plugin_enabled, conf_dict, component_user, component_group,
cache_service_list, plugin_audit_properties, plugin_audit_attributes,
plugin_security_properties, plugin_security_attributes,
plugin_policymgr_ssl_properties, plugin_policymgr_ssl_attributes,
component_list, audit_db_is_enabled, credential_file,
xa_audit_db_password, ssl_truststore_password,
ssl_keystore_password, api_version=None, stack_version_override = None, skip_if_rangeradmin_down = True,
is_security_enabled = False, is_stack_supports_ranger_kerberos = False,
component_user_principal = None, component_user_keytab = None):
if audit_db_is_enabled:
File(component_downloaded_custom_connector,
content = DownloadSource(component_driver_curl_source),
mode = 0644
)
Execute(('cp', '--remove-destination', component_downloaded_custom_connector, component_driver_curl_target),
path=["/bin", "/usr/bin/"],
sudo=True
)
File(component_driver_curl_target, mode=0644)
stack_version = get_stack_version(component_select_name)
if stack_version_override is not None:
stack_version = stack_version_override
component_conf_dir = conf_dict
if plugin_enabled:
if api_version is not None and api_version == 'v2':
ranger_adm_obj = RangeradminV2(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
if is_security_enabled and is_stack_supports_ranger_kerberos:
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user,is_security_enabled,component_user,component_user_principal,component_user_keytab)
else:
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user)
else:
ranger_adm_obj = Rangeradmin(url=policymgr_mgr_url, skip_if_rangeradmin_down=skip_if_rangeradmin_down)
ranger_adm_obj.create_ranger_repository(service_name, repo_name, plugin_repo_dict,
ranger_env_properties['ranger_admin_username'], ranger_env_properties['ranger_admin_password'],
ranger_env_properties['admin_username'], ranger_env_properties['admin_password'],
policy_user)
current_datetime = datetime.now()
File(format('{component_conf_dir}/ranger-security.xml'),
owner = component_user,
group = component_group,
mode = 0644,
content = InlineTemplate(format('<ranger>\n<enabled>{current_datetime}</enabled>\n</ranger>'))
)
Directory([os.path.join('/etc', 'ranger', repo_name), os.path.join('/etc', 'ranger', repo_name, 'policycache')],
owner = component_user,
group = component_group,
mode=0775,
create_parents = True,
cd_access = 'a'
)
for cache_service in cache_service_list:
File(os.path.join('/etc', 'ranger', repo_name, 'policycache',format('{cache_service}_{repo_name}.json')),
owner = component_user,
group = component_group,
mode = 0644
)
XmlConfig(format('ranger-{service_name}-audit.xml'),
conf_dir=component_conf_dir,
configurations=plugin_audit_properties,
configuration_attributes=plugin_audit_attributes,
owner = component_user,
group = component_group,
mode=0744)
XmlConfig(format('ranger-{service_name}-security.xml'),
conf_dir=component_conf_dir,
configurations=plugin_security_properties,
configuration_attributes=plugin_security_attributes,
owner = component_user,
group = component_group,
mode=0744)
if str(service_name).lower() == 'yarn' :
XmlConfig("ranger-policymgr-ssl-yarn.xml",
conf_dir=component_conf_dir,
configurations=plugin_policymgr_ssl_properties,
configuration_attributes=plugin_policymgr_ssl_attributes,
owner = component_user,
group = component_group,
mode=0744)
else :
XmlConfig("ranger-policymgr-ssl.xml",
conf_dir=component_conf_dir,
configurations=plugin_policymgr_ssl_properties,
configuration_attributes=plugin_policymgr_ssl_attributes,
owner = component_user,
group = component_group,
mode=0744)
#This should be done by rpm
#setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list)
setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file,
xa_audit_db_password, ssl_truststore_password, ssl_keystore_password,
component_user, component_group, java_home)
else:
File(format('{component_conf_dir}/ranger-security.xml'),
action="delete"
)
def setup_ranger_plugin_jar_symblink(stack_version, service_name, component_list):
stack_root = Script.get_stack_root()
jar_files = os.listdir(format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib'))
for jar_file in jar_files:
for component in component_list:
Execute(('ln','-sf',format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),format('{stack_root}/current/{component}/lib/{jar_file}')),
not_if=format('ls {stack_root}/current/{component}/lib/{jar_file}'),
only_if=format('ls {stack_root}/{stack_version}/ranger-{service_name}-plugin/lib/{jar_file}'),
sudo=True)
def setup_ranger_plugin_keystore(service_name, audit_db_is_enabled, stack_version, credential_file, xa_audit_db_password,
ssl_truststore_password, ssl_keystore_password, component_user, component_group, java_home):
stack_root = Script.get_stack_root()
cred_lib_path = format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/install/lib/*')
cred_setup_prefix = (format('{stack_root}/{stack_version}/ranger-{service_name}-plugin/ranger_credential_helper.py'), '-l', cred_lib_path)
if audit_db_is_enabled:
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'auditDBCred', '-v', PasswordString(xa_audit_db_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslKeyStore', '-v', PasswordString(ssl_keystore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
cred_setup = cred_setup_prefix + ('-f', credential_file, '-k', 'sslTrustStore', '-v', PasswordString(ssl_truststore_password), '-c', '1')
Execute(cred_setup, environment={'JAVA_HOME': java_home}, logoutput=True, sudo=True)
File(credential_file,
owner = component_user,
group = component_group,
mode = 0640
)
| 48.304569
| 168
| 0.710593
|
4a071d0a6cc3379f80ebd30742210808e46d146a
| 5,718
|
py
|
Python
|
jiant/utils/tokenizers.py
|
cjlovering/jiant
|
0120aeb86e8c1f5ffbf1b7d7549bce187dbb9b78
|
[
"MIT"
] | null | null | null |
jiant/utils/tokenizers.py
|
cjlovering/jiant
|
0120aeb86e8c1f5ffbf1b7d7549bce187dbb9b78
|
[
"MIT"
] | null | null | null |
jiant/utils/tokenizers.py
|
cjlovering/jiant
|
0120aeb86e8c1f5ffbf1b7d7549bce187dbb9b78
|
[
"MIT"
] | 1
|
2020-05-15T08:32:32.000Z
|
2020-05-15T08:32:32.000Z
|
"""
Tokenizer class
To add a tokenizer, add to the below inherting from
main Tokenizer class.
"""
import functools
import logging as log
import os
from sacremoses import MosesDetokenizer
from sacremoses import MosesTokenizer as SacreMosesTokenizer
from nltk.tokenize.simple import SpaceTokenizer
from jiant.huggingface_transformers_interface import input_module_uses_transformers
from transformers import (
BertTokenizer,
RobertaTokenizer,
AlbertTokenizer,
XLNetTokenizer,
OpenAIGPTTokenizer,
GPT2Tokenizer,
TransfoXLTokenizer,
XLMTokenizer,
)
class Tokenizer(object):
def tokenize(self, sentence):
raise NotImplementedError
def select_tokenizer(args):
"""
Select a sane default tokenizer.
"""
if args.tokenizer == "auto":
if input_module_uses_transformers(args.input_module):
tokenizer_name = args.input_module
else:
tokenizer_name = "MosesTokenizer"
else:
tokenizer_name = args.tokenizer
return tokenizer_name
class SplitCharsTokenizer(Tokenizer):
"""
This tokenizer splits a string (sentence or word) into individual characters.
"""
def __init__(self):
super().__init__()
def tokenize(self, sequence):
return list(sequence)
def detokenize(self, tokens):
return "".join(tokens)
class MosesTokenizer(Tokenizer):
def __init__(self):
super().__init__()
self._tokenizer = SacreMosesTokenizer()
self._detokenizer = MosesDetokenizer()
def tokenize(self, sentence):
return self._tokenizer.tokenize(sentence)
def detokenize(self, tokens):
"""Unescape Moses punctuation tokens.
Replaces escape sequences like [ with the original characters
(such as '['), so they better align to the original text.
"""
return [self._detokenizer.unescape_xml(t) for t in tokens]
def detokenize_ptb(self, tokens):
# Not a perfect detokenizer, but a "good-enough" stand in.
rep_dict = {
"-LSB-": "[",
"-RSB-": "]",
"-LRB-": "(",
"-RRB-": ")",
"-LCB-": "{",
"-RCB-": "}",
"``": '"',
"''": '"',
}
str1 = self._detokenizer.detokenize(replace_list(tokens, rep_dict))
return str1
@functools.lru_cache(maxsize=8, typed=False)
def get_tokenizer(tokenizer_name):
log.info(f"\tLoading Tokenizer {tokenizer_name}")
if tokenizer_name.startswith("bert-"):
do_lower_case = tokenizer_name.endswith("uncased")
tokenizer = BertTokenizer.from_pretrained(tokenizer_name, do_lower_case=do_lower_case)
elif tokenizer_name.startswith("roberta-"):
tokenizer = RobertaTokenizer.from_pretrained(tokenizer_name)
elif tokenizer_name.startswith("albert-"):
tokenizer = AlbertTokenizer.from_pretrained(tokenizer_name)
elif tokenizer_name.startswith("xlnet-"):
do_lower_case = tokenizer_name.endswith("uncased")
tokenizer = XLNetTokenizer.from_pretrained(tokenizer_name, do_lower_case=do_lower_case)
elif tokenizer_name.startswith("openai-gpt"):
tokenizer = OpenAIGPTTokenizer.from_pretrained(tokenizer_name)
elif tokenizer_name.startswith("gpt2"):
tokenizer = GPT2Tokenizer.from_pretrained(tokenizer_name)
elif tokenizer_name.startswith("transfo-xl-"):
# TransformerXL is trained on data pretokenized with MosesTokenizer
tokenizer = MosesTokenizer()
elif tokenizer_name.startswith("xlm-"):
tokenizer = XLMTokenizer.from_pretrained(tokenizer_name)
elif tokenizer_name == "MosesTokenizer":
tokenizer = MosesTokenizer()
elif tokenizer_name == "SplitChars":
tokenizer = SplitCharsTokenizer()
elif tokenizer_name == "":
tokenizer = SpaceTokenizer()
else:
tokenizer = None
return tokenizer
def bert_get_tokenized_string_span_map(text, b_tokens, verbose=False):
"""
Given a string, an a BERT tokenization of the string, returns list of
[
bert_token,
start char index of token in string,
(exclusive) end char index of token in string,
]
There is some fuzziness around assignment of spaces (particularly because of UNK tokens)
but the spans should be contiguous.
"""
b_token_char_indices = []
text_i = 0
for b_token in b_tokens:
stripped_b_token = b_token.replace("##", "")
if b_token == "[UNK]":
continue
found_char_i = text[text_i:].find(stripped_b_token)
b_token_char_indices.append(text_i + found_char_i)
text_i += len(stripped_b_token) + found_char_i
b_token_char_indices.append(len(text))
result = []
b_token_char_indices_i = -1
end = 0
for i, b_token in enumerate(b_tokens):
prev_end = end
if b_token == "[UNK]":
start = prev_end
else:
b_token_char_indices_i += 1
start = b_token_char_indices[b_token_char_indices_i]
if i == len(b_tokens) - 1:
end = len(text)
elif b_token == "[UNK]":
end = b_token_char_indices[b_token_char_indices_i + 1]
elif b_token != "[UNK]" and b_tokens[i + 1] != "[UNK]":
end = b_token_char_indices[b_token_char_indices_i + 1]
elif b_tokens[i + 1] == "[UNK]":
end = start + len(b_token)
else:
raise RuntimeError()
if verbose:
print(b_token, start, end, repr(text[start:end]))
result.append((b_token, start, end))
return result
def replace_list(ls, d):
return [d.get(elem, elem) for elem in ls]
| 31.766667
| 95
| 0.649703
|
4a071d267abf81d97860464749812b943b20b756
| 4,592
|
py
|
Python
|
GetStarted/08_masking.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | 1
|
2020-05-31T14:19:59.000Z
|
2020-05-31T14:19:59.000Z
|
GetStarted/08_masking.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | null | null | null |
GetStarted/08_masking.py
|
c11/earthengine-py-notebooks
|
144b57e4d952da095ba73c3cc8ce2f36291162ff
|
[
"MIT"
] | null | null | null |
# %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/08_masking.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/08_masking.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/08_masking.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as emap
except:
import geemap as emap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = emap.Map(center=[40,-100], zoom=4)
Map.add_basemap('ROADMAP') # Add Google Map
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
# This function gets NDVI from Landsat 5 imagery.
def getNDVI(image):
return image.normalizedDifference(['B4', 'B3'])
# Load two Landsat 5 images, 20 years apart.
image1 = ee.Image('LANDSAT/LT05/C01/T1_TOA/LT05_044034_19900604')
image2 = ee.Image('LANDSAT/LT05/C01/T1_TOA/LT05_044034_20100611')
# Compute NDVI from the scenes.
ndvi1 = getNDVI(image1)
ndvi2 = getNDVI(image2)
# Compute the difference in NDVI.
ndviDifference = ndvi2.subtract(ndvi1)
# Load the land mask from the SRTM DEM.
landMask = ee.Image('CGIAR/SRTM90_V4').mask()
# Update the NDVI difference mask with the land mask.
maskedDifference = ndviDifference.updateMask(landMask)
# Display the masked result.
vizParams = {'min': -0.5, 'max': 0.5,
'palette': ['FF0000', 'FFFFFF', '0000FF']}
Map.setCenter(-122.2531, 37.6295, 9)
Map.addLayer(maskedDifference, vizParams, 'NDVI difference')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| 45.92
| 1,021
| 0.744991
|
4a071dab27fe475b1d2776dd5dc09fe2ce76797a
| 6,326
|
py
|
Python
|
machin/utils/save_env.py
|
lorenzosteccanella/machin
|
9d3ce87dbed820b5019211b0690b54613084d9e4
|
[
"MIT"
] | 287
|
2020-06-13T05:19:50.000Z
|
2022-03-31T04:46:32.000Z
|
machin/utils/save_env.py
|
lorenzosteccanella/machin
|
9d3ce87dbed820b5019211b0690b54613084d9e4
|
[
"MIT"
] | 19
|
2020-08-19T05:33:45.000Z
|
2022-03-27T15:16:03.000Z
|
machin/utils/save_env.py
|
lorenzosteccanella/machin
|
9d3ce87dbed820b5019211b0690b54613084d9e4
|
[
"MIT"
] | 44
|
2020-07-06T00:41:44.000Z
|
2022-03-29T17:05:08.000Z
|
from datetime import datetime, timedelta
from typing import Union, Iterable
from os.path import join
import os
import shutil
from machin.utils.logging import default_logger
from machin.utils.prepare import prep_clear_dirs, prep_create_dirs
class SaveEnv:
def __init__(
self,
env_root: str,
restart_from_trial: Union[str, None] = None,
time_format="%Y_%m_%d_%H_%M_%S",
create_sub_dirs=True,
):
"""
Create the default environment for saving. creates something like::
<your environment root>
├── config
├── log
│ ├── images
│ └── train_log
└── model
Args:
env_root: root directory for all trials of the environment.
restart_from_trial: instead of creating a new save environment
for a new trial, use a existing save environment of an older
trial, old trial name should be in format ``time_format``
time_format: Time formatter, setting it to an empty string will cause
the save environment to use ``env_root`` directly instead of using
sub directories with a datetime name.
create_dirs: Whether to create directories.
"""
self.env_root = env_root
self.time_format = time_format
if restart_from_trial is None:
self.env_create_time = datetime.now()
else:
self.env_create_time = datetime.strptime(
restart_from_trial, self.time_format
)
self._check_dirs()
if create_sub_dirs:
self.create_sub_dirs()
def create_dirs(self, dirs: Iterable[str]):
"""
Create additional directories in root.
Args:
dirs: Directories.
"""
prep_create_dirs(
[
join(self.env_root, self.env_create_time.strftime(self.time_format), d)
for d in dirs
]
)
def get_trial_root(self):
# pylint: disable=missing-docstring
return join(self.env_root, self.env_create_time.strftime(self.time_format))
def get_trial_config_dir(self):
# pylint: disable=missing-docstring
return join(
self.env_root, self.env_create_time.strftime(self.time_format), "config"
)
def get_trial_model_dir(self):
# pylint: disable=missing-docstring
return join(
self.env_root, self.env_create_time.strftime(self.time_format), "model"
)
def get_trial_image_dir(self):
# pylint: disable=missing-docstring
return join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"log",
"images",
)
def get_trial_train_log_dir(self):
# pylint: disable=missing-docstring
return join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"log",
"train_log",
)
def get_trial_time(self):
# pylint: disable=missing-docstring
return self.env_create_time
def clear_trial_config_dir(self):
# pylint: disable=missing-docstring
prep_clear_dirs(
[
join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"config",
)
]
)
def clear_trial_model_dir(self):
# pylint: disable=missing-docstring
prep_clear_dirs(
[
join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"model",
)
]
)
def clear_trial_image_dir(self):
# pylint: disable=missing-docstring
prep_clear_dirs(
[
join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"log",
"images",
)
]
)
def clear_trial_train_log_dir(self):
# pylint: disable=missing-docstring
prep_clear_dirs(
[
join(
self.env_root,
self.env_create_time.strftime(self.time_format),
"log",
"train_log",
)
]
)
def remove_trials_older_than(
self,
diff_day: int = 0,
diff_hour: int = 1,
diff_minute: int = 0,
diff_second: int = 0,
):
"""
By default this function removes all trials started one hour earlier
than current time.
Args:
diff_day: Difference in days.
diff_hour: Difference in hours.
diff_minute: Difference in minutes.
diff_second: Difference in seconds.
"""
trial_list = [f for f in os.listdir(self.env_root)]
current_time = datetime.now()
diff_threshold = timedelta(
days=diff_day, hours=diff_hour, minutes=diff_minute, seconds=diff_second
)
for file in trial_list:
try:
time = datetime.strptime(file, self.time_format)
except ValueError:
# not a trial
pass
else:
diff_time = current_time - time
if diff_time > diff_threshold:
rm_path = join(self.env_root, file)
default_logger.info(f"Removing trial directory: {rm_path}")
shutil.rmtree(rm_path)
def create_sub_dirs(self):
root_dir = join(self.env_root, self.env_create_time.strftime(self.time_format))
prep_create_dirs(
(
join(root_dir, "model"),
join(root_dir, "config"),
join(root_dir, "log", "images"),
join(root_dir, "log", "train_log"),
)
)
def _check_dirs(self):
"""
Overload this function in your environment class to check directory
mapping
Raises:
RuntimeError if directory mapping is invalid.
"""
pass
| 30.12381
| 87
| 0.541574
|
4a071dc9fc3353dd0a7f496bc768fcc19484c137
| 1,825
|
py
|
Python
|
tests/test_integration.py
|
mcfarljm/structural-imbalance
|
a41490bd5e9101cf826e4b76b0eb12a46cc0fb52
|
[
"Apache-2.0"
] | 14
|
2019-12-04T18:30:34.000Z
|
2022-01-29T07:05:11.000Z
|
tests/test_integration.py
|
mcfarljm/structural-imbalance
|
a41490bd5e9101cf826e4b76b0eb12a46cc0fb52
|
[
"Apache-2.0"
] | 13
|
2019-12-13T19:35:50.000Z
|
2021-07-21T17:00:26.000Z
|
tests/test_integration.py
|
mcfarljm/structural-imbalance
|
a41490bd5e9101cf826e4b76b0eb12a46cc0fb52
|
[
"Apache-2.0"
] | 14
|
2019-12-13T19:21:50.000Z
|
2021-07-12T03:09:16.000Z
|
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import unittest
import os
import sys
from tests import qpu_available
project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class IntegrationTests(unittest.TestCase):
def runDemo(self, hardware):
demo_file = os.path.join(project_dir, 'structural_imbalance.py')
output = subprocess.check_output([sys.executable, demo_file, hardware])
return output.decode("utf-8")
def test_structural_imbalance_cpu(self):
output = self.runDemo("--cpu")
output = output.upper()
if os.getenv('DEBUG_OUTPUT'):
print("Example output \n" + output)
# Simple check to make sure a "FOUND X VIOLATIONS OUT OF Y EDEGES" message was printed
self.assertIn("VIOLATIONS OUT OF", output)
@unittest.skipUnless(qpu_available(), "requires QPU")
def test_structural_imbalance_qpu(self):
output = self.runDemo("--qpu")
output = output.upper()
if os.getenv('DEBUG_OUTPUT'):
print("Example output \n" + output)
# Simple check to make sure a "FOUND X VIOLATIONS OUT OF Y EDEGES" message was printed
self.assertIn("VIOLATIONS OUT OF", output)
if __name__ == '__main__':
unittest.main()
| 33.181818
| 94
| 0.70137
|
4a071f07c1e8c72a1ff76bfebbc732c86fba8d8e
| 28,209
|
py
|
Python
|
rlkit/launchers/launcher_util.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | 3
|
2021-06-15T03:03:52.000Z
|
2021-12-20T03:08:03.000Z
|
rlkit/launchers/launcher_util.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | null | null | null |
rlkit/launchers/launcher_util.py
|
JieRen98/rlkit-pmoe
|
5ef4e056764d2c4a8d6e4c6da89295304b1fec3f
|
[
"MIT"
] | null | null | null |
import datetime
import json
import os
import os.path as osp
import pickle
import random
import sys
import time
from collections import namedtuple
import __main__ as main
import dateutil.tz
import numpy as np
import rlkit.pythonplusplus as ppp
from rlkit.core import logger
from rlkit.launchers import conf
from rlkit.torch.pytorch_util import set_gpu_mode
GitInfo = namedtuple(
'GitInfo',
[
'directory',
'code_diff',
'code_diff_staged',
'commit_hash',
'branch_name',
],
)
def get_git_infos(dirs):
try:
import git
git_infos = []
for directory in dirs:
# Idk how to query these things, so I'm just doing try-catch
try:
repo = git.Repo(directory)
try:
branch_name = repo.active_branch.name
except TypeError:
branch_name = '[DETACHED]'
git_infos.append(GitInfo(
directory=directory,
code_diff=repo.git.diff(None),
code_diff_staged=repo.git.diff('--staged'),
commit_hash=repo.head.commit.hexsha,
branch_name=branch_name,
))
except git.exc.InvalidGitRepositoryError as e:
print("Not a valid git repo: {}".format(directory))
except ImportError:
git_infos = None
return git_infos
def recursive_items(dictionary):
"""
Get all (key, item) recursively in a potentially recursive dictionary.
Usage:
```
x = {
'foo' : {
'bar' : 5
}
}
recursive_items(x)
# output:
# ('foo', {'bar' : 5})
# ('bar', 5)
```
:param dictionary:
:return:
"""
for key, value in dictionary.items():
yield key, value
if type(value) is dict:
yield from recursive_items(value)
def save_experiment_data(dictionary, log_dir):
with open(log_dir + '/experiment.pkl', 'wb') as handle:
pickle.dump(dictionary, handle, protocol=pickle.HIGHEST_PROTOCOL)
def run_experiment_here(
experiment_function,
variant=None,
exp_id=0,
seed=None,
use_gpu=True,
# Logger params:
exp_prefix="default",
snapshot_mode='last',
snapshot_gap=1,
git_infos=None,
script_name=None,
base_log_dir=None,
force_randomize_seed=False,
log_dir=None,
**setup_logger_kwargs
):
"""
Run an experiment locally without any serialization.
:param experiment_function: Function. `variant` will be passed in as its
only argument.
:param exp_prefix: Experiment prefix for the save file.
:param variant: Dictionary passed in to `experiment_function`.
:param exp_id: Experiment ID. Should be unique across all
experiments. Note that one experiment may correspond to multiple seeds,.
:param seed: Seed used for this experiment.
:param use_gpu: Run with GPU. By default False.
:param script_name: Name of the running script
:param log_dir: If set, set the log directory to this. Otherwise,
the directory will be auto-generated based on the exp_prefix.
:return:
"""
if variant is None:
variant = {}
variant['exp_id'] = str(exp_id)
if force_randomize_seed or seed is None:
seed = random.randint(0, 100000)
variant['seed'] = str(seed)
reset_execution_environment()
actual_log_dir = setup_logger(
exp_prefix=exp_prefix,
variant=variant,
exp_id=exp_id,
seed=seed,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
base_log_dir=base_log_dir,
log_dir=log_dir,
git_infos=git_infos,
script_name=script_name,
**setup_logger_kwargs
)
set_seed(seed)
set_gpu_mode(use_gpu)
run_experiment_here_kwargs = dict(
variant=variant,
exp_id=exp_id,
seed=seed,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=script_name,
base_log_dir=base_log_dir,
**setup_logger_kwargs
)
save_experiment_data(
dict(
run_experiment_here_kwargs=run_experiment_here_kwargs
),
actual_log_dir
)
return experiment_function(variant)
def create_exp_name(exp_prefix, exp_id=0, seed=0):
"""
Create a semi-unique experiment name that has a timestamp
:param exp_prefix:
:param exp_id:
:return:
"""
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
return "%s_%s_%04d--s-%d" % (exp_prefix, timestamp, exp_id, seed)
def create_log_dir(
exp_prefix,
exp_id=0,
seed=0,
base_log_dir=None,
include_exp_prefix_sub_dir=True,
):
"""
Creates and returns a unique log directory.
:param exp_prefix: All experiments with this prefix will have log
directories be under this directory.
:param exp_id: The number of the specific experiment run within this
experiment.
:param base_log_dir: The directory where all log should be saved.
:return:
"""
exp_name = create_exp_name(exp_prefix, exp_id=exp_id,
seed=seed)
if base_log_dir is None:
base_log_dir = conf.LOCAL_LOG_DIR
if include_exp_prefix_sub_dir:
log_dir = osp.join(base_log_dir, exp_prefix.replace("_", "-"), exp_name)
else:
log_dir = osp.join(base_log_dir, exp_name)
if osp.exists(log_dir):
print("WARNING: Log directory already exists {}".format(log_dir))
os.makedirs(log_dir, exist_ok=True)
return log_dir
def setup_logger(
exp_prefix="default",
variant=None,
text_log_file="debug.log",
variant_log_file="variant.json",
tabular_log_file="progress.csv",
snapshot_mode="last",
snapshot_gap=1,
log_tabular_only=False,
log_dir=None,
git_infos=None,
script_name=None,
**create_log_dir_kwargs
):
"""
Set up logger to have some reasonable default settings.
Will save log output to
based_log_dir/exp_prefix/exp_name.
exp_name will be auto-generated to be unique.
If log_dir is specified, then that directory is used as the output dir.
:param exp_prefix: The sub-directory for this specific experiment.
:param variant:
:param text_log_file:
:param variant_log_file:
:param tabular_log_file:
:param snapshot_mode:
:param log_tabular_only:
:param snapshot_gap:
:param log_dir:
:param git_infos:
:param script_name: If set, save the script name to this.
:return:
"""
if git_infos is None:
git_infos = get_git_infos(conf.CODE_DIRS_TO_MOUNT)
first_time = log_dir is None
if first_time:
log_dir = create_log_dir(exp_prefix, **create_log_dir_kwargs)
if variant is not None:
logger.log("Variant:")
logger.log(json.dumps(dict_to_safe_json(variant), indent=2))
variant_log_path = osp.join(log_dir, variant_log_file)
logger.log_variant(variant_log_path, variant)
tabular_log_path = osp.join(log_dir, tabular_log_file)
text_log_path = osp.join(log_dir, text_log_file)
logger.add_text_output(text_log_path)
if first_time:
logger.add_tabular_output(tabular_log_path)
else:
logger._add_output(tabular_log_path, logger._tabular_outputs,
logger._tabular_fds, mode='a')
for tabular_fd in logger._tabular_fds:
logger._tabular_header_written.add(tabular_fd)
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(snapshot_mode)
logger.set_snapshot_gap(snapshot_gap)
logger.set_log_tabular_only(log_tabular_only)
exp_name = log_dir.split("/")[-1]
logger.push_prefix("[%s] " % exp_name)
if git_infos is not None:
for (
directory, code_diff, code_diff_staged, commit_hash, branch_name
) in git_infos:
if directory[-1] == '/':
directory = directory[:-1]
diff_file_name = directory[1:].replace("/", "-") + ".patch"
diff_staged_file_name = (
directory[1:].replace("/", "-") + "_staged.patch"
)
if code_diff is not None and len(code_diff) > 0:
with open(osp.join(log_dir, diff_file_name), "w") as f:
f.write(code_diff + '\n')
if code_diff_staged is not None and len(code_diff_staged) > 0:
with open(osp.join(log_dir, diff_staged_file_name), "w") as f:
f.write(code_diff_staged + '\n')
with open(osp.join(log_dir, "git_infos.txt"), "a") as f:
f.write("directory: {}\n".format(directory))
f.write("git hash: {}\n".format(commit_hash))
f.write("git branch name: {}\n\n".format(branch_name))
if script_name is not None:
with open(osp.join(log_dir, "script_name.txt"), "w") as f:
f.write(script_name)
return log_dir
def dict_to_safe_json(d):
"""
Convert each value in the dictionary into a JSON'able primitive.
:param d:
:return:
"""
new_d = {}
for key, item in d.items():
if safe_json(item):
new_d[key] = item
else:
if isinstance(item, dict):
new_d[key] = dict_to_safe_json(item)
else:
new_d[key] = str(item)
return new_d
def safe_json(data):
if data is None:
return True
elif isinstance(data, (bool, int, float)):
return True
elif isinstance(data, (tuple, list)):
return all(safe_json(x) for x in data)
elif isinstance(data, dict):
return all(isinstance(k, str) and safe_json(v) for k, v in data.items())
return False
def set_seed(seed):
"""
Set the seed for all the possible random number generators.
:param seed:
:return: None
"""
seed = int(seed)
random.seed(seed)
np.random.seed(seed)
def reset_execution_environment():
"""
Call this between calls to separate experiments.
:return:
"""
logger.reset()
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
"""
Below is doodad-specific code
"""
ec2_okayed = False
gpu_ec2_okayed = False
first_sss_launch = True
try:
import doodad.mount as mount
from doodad.utils import REPO_DIR
CODE_MOUNTS = [
mount.MountLocal(local_dir=REPO_DIR, pythonpath=True),
]
for code_dir in conf.CODE_DIRS_TO_MOUNT:
CODE_MOUNTS.append(mount.MountLocal(local_dir=code_dir, pythonpath=True))
NON_CODE_MOUNTS = []
for non_code_mapping in conf.DIR_AND_MOUNT_POINT_MAPPINGS:
NON_CODE_MOUNTS.append(mount.MountLocal(**non_code_mapping))
SSS_CODE_MOUNTS = []
SSS_NON_CODE_MOUNTS = []
if hasattr(conf, 'SSS_DIR_AND_MOUNT_POINT_MAPPINGS'):
for non_code_mapping in conf.SSS_DIR_AND_MOUNT_POINT_MAPPINGS:
SSS_NON_CODE_MOUNTS.append(mount.MountLocal(**non_code_mapping))
if hasattr(conf, 'SSS_CODE_DIRS_TO_MOUNT'):
for code_dir in conf.SSS_CODE_DIRS_TO_MOUNT:
SSS_CODE_MOUNTS.append(
mount.MountLocal(local_dir=code_dir, pythonpath=True)
)
except ImportError:
print("doodad not detected")
target_mount = None
def run_experiment(
method_call,
mode='local',
exp_prefix='default',
seed=None,
variant=None,
exp_id=0,
prepend_date_to_exp_prefix=True,
use_gpu=False,
snapshot_mode='last',
snapshot_gap=1,
base_log_dir=None,
local_input_dir_to_mount_point_dict=None, # TODO(vitchyr): test this
# local settings
skip_wait=False,
# ec2 settings
sync_interval=180,
region='us-east-1',
instance_type=None,
spot_price=None,
verbose=False,
num_exps_per_instance=1,
# sss settings
time_in_mins=None,
# ssh settings
ssh_host=None,
# gcp
gcp_kwargs=None,
):
"""
Usage:
```
def foo(variant):
x = variant['x']
y = variant['y']
logger.log("sum", x+y)
variant = {
'x': 4,
'y': 3,
}
run_experiment(foo, variant, exp_prefix="my-experiment")
```
Results are saved to
`base_log_dir/<date>-my-experiment/<date>-my-experiment-<unique-id>`
By default, the base_log_dir is determined by
`config.LOCAL_LOG_DIR/`
:param method_call: a function that takes in a dictionary as argument
:param mode: A string:
- 'local'
- 'local_docker'
- 'ec2'
- 'here_no_doodad': Run without doodad call
:param exp_prefix: name of experiment
:param seed: Seed for this specific trial.
:param variant: Dictionary
:param exp_id: One experiment = one variant setting + multiple seeds
:param prepend_date_to_exp_prefix: If False, do not prepend the date to
the experiment directory.
:param use_gpu:
:param snapshot_mode: See rlkit.core.logging
:param snapshot_gap: See rlkit.core.logging
:param base_log_dir: Will over
:param sync_interval: How often to sync s3 data (in seconds).
:param local_input_dir_to_mount_point_dict: Dictionary for doodad.
:param ssh_host: the name of the host you want to ssh onto, should correspond to an entry in
config.py of the following form:
SSH_HOSTS=dict(
ssh_host=dict(
username='username',
hostname='hostname/ip address',
)
)
- if ssh_host is set to None, you will use ssh_host specified by
config.SSH_DEFAULT_HOST
:return:
"""
try:
import doodad
import doodad.mode
import doodad.ssh
except ImportError:
print("Doodad not set up! Running experiment here.")
mode = 'here_no_doodad'
global ec2_okayed
global gpu_ec2_okayed
global target_mount
global first_sss_launch
"""
Sanitize inputs as needed
"""
if seed is None:
seed = random.randint(0, 100000)
if variant is None:
variant = {}
if mode == 'ssh' and base_log_dir is None:
base_log_dir = conf.SSH_LOG_DIR
if base_log_dir is None:
if mode == 'sss':
base_log_dir = conf.SSS_LOG_DIR
else:
base_log_dir = conf.LOCAL_LOG_DIR
for key, value in ppp.recursive_items(variant):
# This check isn't really necessary, but it's to prevent myself from
# forgetting to pass a variant through dot_map_dict_to_nested_dict.
if "." in key:
raise Exception(
"Variants should not have periods in keys. Did you mean to "
"convert {} into a nested dictionary?".format(key)
)
if prepend_date_to_exp_prefix:
exp_prefix = time.strftime("%m-%d") + "-" + exp_prefix
variant['seed'] = str(seed)
variant['exp_id'] = str(exp_id)
variant['exp_prefix'] = str(exp_prefix)
variant['instance_type'] = str(instance_type)
try:
import git
doodad_path = osp.abspath(osp.join(
osp.dirname(doodad.__file__),
os.pardir
))
dirs = conf.CODE_DIRS_TO_MOUNT + [doodad_path]
git_infos = []
for directory in dirs:
# Idk how to query these things, so I'm just doing try-catch
try:
repo = git.Repo(directory)
try:
branch_name = repo.active_branch.name
except TypeError:
branch_name = '[DETACHED]'
git_infos.append(GitInfo(
directory=directory,
code_diff=repo.git.diff(None),
code_diff_staged=repo.git.diff('--staged'),
commit_hash=repo.head.commit.hexsha,
branch_name=branch_name,
))
except git.exc.InvalidGitRepositoryError:
pass
except ImportError:
git_infos = None
run_experiment_kwargs = dict(
exp_prefix=exp_prefix,
variant=variant,
exp_id=exp_id,
seed=seed,
use_gpu=use_gpu,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=main.__file__,
)
if mode == 'here_no_doodad':
run_experiment_kwargs['base_log_dir'] = base_log_dir
return run_experiment_here(
method_call,
**run_experiment_kwargs
)
"""
Safety Checks
"""
if mode == 'ec2' or mode == 'gcp':
if not ec2_okayed and not query_yes_no(
"{} costs money. Are you sure you want to run?".format(mode)
):
sys.exit(1)
if not gpu_ec2_okayed and use_gpu:
if not query_yes_no(
"{} is more expensive with GPUs. Confirm?".format(mode)
):
sys.exit(1)
gpu_ec2_okayed = True
ec2_okayed = True
"""
GPU vs normal configs
"""
if use_gpu:
docker_image = conf.GPU_DOODAD_DOCKER_IMAGE
if instance_type is None:
instance_type = conf.GPU_INSTANCE_TYPE
else:
assert instance_type[0] == 'g'
if spot_price is None:
spot_price = conf.GPU_SPOT_PRICE
else:
docker_image = conf.DOODAD_DOCKER_IMAGE
if instance_type is None:
instance_type = conf.INSTANCE_TYPE
if spot_price is None:
spot_price = conf.SPOT_PRICE
if mode == 'sss':
singularity_image = conf.SSS_IMAGE
elif mode in ['local_singularity', 'slurm_singularity']:
singularity_image = conf.SINGULARITY_IMAGE
else:
singularity_image = None
"""
Get the mode
"""
mode_kwargs = {}
if use_gpu and mode == 'ec2':
image_id = conf.REGION_TO_GPU_AWS_IMAGE_ID[region]
if region == 'us-east-1':
avail_zone = conf.REGION_TO_GPU_AWS_AVAIL_ZONE.get(region, "us-east-1b")
mode_kwargs['extra_ec2_instance_kwargs'] = dict(
Placement=dict(
AvailabilityZone=avail_zone,
),
)
else:
image_id = None
if hasattr(conf, "AWS_S3_PATH"):
aws_s3_path = conf.AWS_S3_PATH
else:
aws_s3_path = None
"""
Create mode
"""
if mode == 'local':
dmode = doodad.mode.Local(skip_wait=skip_wait)
elif mode == 'local_docker':
dmode = doodad.mode.LocalDocker(
image=docker_image,
gpu=use_gpu,
)
elif mode == 'ssh':
if ssh_host == None:
ssh_dict = conf.SSH_HOSTS[conf.SSH_DEFAULT_HOST]
else:
ssh_dict = conf.SSH_HOSTS[ssh_host]
credentials = doodad.ssh.credentials.SSHCredentials(
username=ssh_dict['username'],
hostname=ssh_dict['hostname'],
identity_file=conf.SSH_PRIVATE_KEY
)
dmode = doodad.mode.SSHDocker(
credentials=credentials,
image=docker_image,
gpu=use_gpu,
)
elif mode == 'local_singularity':
dmode = doodad.mode.LocalSingularity(
image=singularity_image,
gpu=use_gpu,
)
elif mode == 'slurm_singularity' or mode == 'sss':
assert time_in_mins is not None, "Must approximate/set time in minutes"
if use_gpu:
kwargs = conf.SLURM_GPU_CONFIG
else:
kwargs = conf.SLURM_CPU_CONFIG
if mode == 'slurm_singularity':
dmode = doodad.mode.SlurmSingularity(
image=singularity_image,
gpu=use_gpu,
time_in_mins=time_in_mins,
skip_wait=skip_wait,
pre_cmd=conf.SINGULARITY_PRE_CMDS,
**kwargs
)
else:
dmode = doodad.mode.ScriptSlurmSingularity(
image=singularity_image,
gpu=use_gpu,
time_in_mins=time_in_mins,
skip_wait=skip_wait,
pre_cmd=conf.SSS_PRE_CMDS,
**kwargs
)
elif mode == 'ec2':
# Do this separately in case someone does not have EC2 configured
dmode = doodad.mode.EC2AutoconfigDocker(
image=docker_image,
image_id=image_id,
region=region,
instance_type=instance_type,
spot_price=spot_price,
s3_log_prefix=exp_prefix,
# Ask Vitchyr or Steven from an explanation, but basically we
# will start just making the sub-directories within rlkit rather
# than relying on doodad to do that.
s3_log_name="",
gpu=use_gpu,
aws_s3_path=aws_s3_path,
num_exps=num_exps_per_instance,
**mode_kwargs
)
elif mode == 'gcp':
image_name = conf.GCP_IMAGE_NAME
if use_gpu:
image_name = conf.GCP_GPU_IMAGE_NAME
if gcp_kwargs is None:
gcp_kwargs = {}
config_kwargs = {
**conf.GCP_DEFAULT_KWARGS,
**dict(image_name=image_name),
**gcp_kwargs
}
dmode = doodad.mode.GCPDocker(
image=docker_image,
gpu=use_gpu,
gcp_bucket_name=conf.GCP_BUCKET_NAME,
gcp_log_prefix=exp_prefix,
gcp_log_name="",
**config_kwargs
)
else:
raise NotImplementedError("Mode not supported: {}".format(mode))
"""
Get the mounts
"""
mounts = create_mounts(
base_log_dir=base_log_dir,
mode=mode,
sync_interval=sync_interval,
local_input_dir_to_mount_point_dict=local_input_dir_to_mount_point_dict,
)
"""
Get the outputs
"""
launch_locally = None
target = conf.RUN_DOODAD_EXPERIMENT_SCRIPT_PATH
if mode == 'ec2':
# Ignored since I'm setting the snapshot dir directly
base_log_dir_for_script = None
run_experiment_kwargs['force_randomize_seed'] = True
# The snapshot dir needs to be specified for S3 because S3 will
# automatically create the experiment director and sub-directory.
snapshot_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET
elif mode == 'local':
base_log_dir_for_script = base_log_dir
# The snapshot dir will be automatically created
snapshot_dir_for_script = None
elif mode == 'local_docker':
base_log_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET
# The snapshot dir will be automatically created
snapshot_dir_for_script = None
elif mode == 'ssh':
base_log_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET
# The snapshot dir will be automatically created
snapshot_dir_for_script = None
elif mode in ['local_singularity', 'slurm_singularity', 'sss']:
base_log_dir_for_script = base_log_dir
# The snapshot dir will be automatically created
snapshot_dir_for_script = None
launch_locally = True
if mode == 'sss':
dmode.set_first_time(first_sss_launch)
first_sss_launch = False
target = conf.SSS_RUN_DOODAD_EXPERIMENT_SCRIPT_PATH
elif mode == 'here_no_doodad':
base_log_dir_for_script = base_log_dir
# The snapshot dir will be automatically created
snapshot_dir_for_script = None
elif mode == 'gcp':
# Ignored since I'm setting the snapshot dir directly
base_log_dir_for_script = None
run_experiment_kwargs['force_randomize_seed'] = True
snapshot_dir_for_script = conf.OUTPUT_DIR_FOR_DOODAD_TARGET
else:
raise NotImplementedError("Mode not supported: {}".format(mode))
run_experiment_kwargs['base_log_dir'] = base_log_dir_for_script
target_mount = doodad.launch_python(
target=target,
mode=dmode,
mount_points=mounts,
args={
'method_call': method_call,
'output_dir': snapshot_dir_for_script,
'run_experiment_kwargs': run_experiment_kwargs,
'mode': mode,
},
use_cloudpickle=True,
target_mount=target_mount,
verbose=verbose,
launch_locally=launch_locally,
)
def create_mounts(
mode,
base_log_dir,
sync_interval=180,
local_input_dir_to_mount_point_dict=None,
):
if mode == 'sss':
code_mounts = SSS_CODE_MOUNTS
non_code_mounts = SSS_NON_CODE_MOUNTS
else:
code_mounts = CODE_MOUNTS
non_code_mounts = NON_CODE_MOUNTS
if local_input_dir_to_mount_point_dict is None:
local_input_dir_to_mount_point_dict = {}
else:
raise NotImplementedError("TODO(vitchyr): Implement this")
mounts = [m for m in code_mounts]
for dir, mount_point in local_input_dir_to_mount_point_dict.items():
mounts.append(mount.MountLocal(
local_dir=dir,
mount_point=mount_point,
pythonpath=False,
))
if mode != 'local':
for m in non_code_mounts:
mounts.append(m)
if mode == 'ec2':
output_mount = mount.MountS3(
s3_path='',
mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,
output=True,
sync_interval=sync_interval,
include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar',
'*.log', '*.pkl', '*.mp4', '*.png', '*.jpg',
'*.jpeg', '*.patch'),
)
elif mode == 'gcp':
output_mount = mount.MountGCP(
gcp_path='',
mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,
output=True,
gcp_bucket_name=conf.GCP_BUCKET_NAME,
sync_interval=sync_interval,
include_types=('*.txt', '*.csv', '*.json', '*.gz', '*.tar',
'*.log', '*.pkl', '*.mp4', '*.png', '*.jpg',
'*.jpeg', '*.patch'),
)
elif mode in ['local', 'local_singularity', 'slurm_singularity', 'sss']:
# To save directly to local files (singularity does this), skip mounting
output_mount = mount.MountLocal(
local_dir=base_log_dir,
mount_point=None,
output=True,
)
elif mode == 'local_docker':
output_mount = mount.MountLocal(
local_dir=base_log_dir,
mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,
output=True,
)
elif mode == 'ssh':
output_mount = mount.MountLocal(
local_dir=base_log_dir,
mount_point=conf.OUTPUT_DIR_FOR_DOODAD_TARGET,
output=True,
)
else:
raise NotImplementedError("Mode not supported: {}".format(mode))
mounts.append(output_mount)
return mounts
| 31.624439
| 96
| 0.604984
|
4a071fcc57e31f877c0840bcf4240e1864ced76b
| 5,678
|
py
|
Python
|
tests/backends/test_sprints.py
|
limejump/agile-insights
|
2e4f454f83ad7d3d6e071c3dbc297946f46a671c
|
[
"MIT"
] | null | null | null |
tests/backends/test_sprints.py
|
limejump/agile-insights
|
2e4f454f83ad7d3d6e071c3dbc297946f46a671c
|
[
"MIT"
] | 3
|
2021-02-24T15:01:03.000Z
|
2021-04-07T17:28:37.000Z
|
tests/backends/test_sprints.py
|
limejump/agile-insights
|
2e4f454f83ad7d3d6e071c3dbc297946f46a671c
|
[
"MIT"
] | null | null | null |
import pytest
from datetime import datetime, timedelta, timezone
from lenses import lens
from backends.jira.parse import IssueTypes, JiraIssue, Sprint, SprintMetrics, StatusMetrics, StatusTypes
@pytest.fixture
def basic_scenario():
return (
{
"id": 1,
"goal": "Achieve Something",
"name": "Usain",
"state": "closed",
"startDate": "2020-01-01T09:00:00.000+0100",
"endDate": "2020-01-14T09:00:00.000+0100",
"completeDate": "2020-01-14T09:00:00.000+0100"
},
Sprint(
id_=1,
goal="Achieve Something",
name="Usain",
state="closed",
start=datetime(
2020, 1, 1, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
end=datetime(
2020, 1, 14, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))))
)
def test_basic_parsing(basic_scenario):
raw_json, result = basic_scenario
assert Sprint.from_parsed_json(raw_json) == result
@pytest.fixture
def sprint_issues_lens():
return lens.issues
@pytest.fixture
def base_issues():
return [
JiraIssue(
name="An Issue",
summary="Issue for sprint",
epic=None,
type_=IssueTypes.story,
status=StatusTypes.todo,
story_points=5.0,
labels=set(),
subtasks=[],
status_metrics=StatusMetrics(
started=False,
finished=False,
start=None,
end=None,
days_taken=None),
sprint_metrics=SprintMetrics(sprint_additions=[]))
]
def test_sprint_with_issue(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, result = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
result = sprint_issues_lens.set(base_issues)(result)
assert sprint == result
def test_no_sprint_metrics_means_unplanned(basic_scenario, base_issues):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
assert sprint.planned_issue(sprint.issues[0]) is False
def test_planned_issue_added_at_sprint_start(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].sprint_metrics.set(
SprintMetrics(
sprint_additions=[{
"timestamp": datetime(
2020, 1, 1, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
"sprint_id": 1
}]
)
)(sprint)
assert sprint.planned_issue(sprint.issues[0]) is True
def test_planned_issue_added_before_sprint_start(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].sprint_metrics.set(
SprintMetrics(
sprint_additions=[{
"timestamp": datetime(
2020, 1, 1, 8, 59, 0, tzinfo=timezone(timedelta(hours=1))),
"sprint_id": 1
}]
)
)(sprint)
assert sprint.planned_issue(sprint.issues[0]) is True
def test_unplanned_issue_added_after_sprint_start(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].sprint_metrics.set(
SprintMetrics(
sprint_additions=[{
"timestamp": datetime(
2020, 1, 1, 9, 1, 0, tzinfo=timezone(timedelta(hours=1))),
"sprint_id": 1
}]
)
)(sprint)
assert sprint.planned_issue(sprint.issues[0]) is False
def test_started_in_sprint(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].status_metrics.set(
StatusMetrics(
started=False,
finished=True,
start=datetime(
2020, 1, 1, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
end=datetime(
2020, 1, 2, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
days_taken=1),
)(sprint)
def test_started_because_finished_in_sprint(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].status_metrics.set(
StatusMetrics(
started=False,
finished=True,
start=None,
end=datetime(
2020, 1, 2, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
days_taken=1),
)(sprint)
assert sprint.started_in_sprint(sprint.issues[0]) is True
def test_not_started_in_sprint(
basic_scenario, base_issues, sprint_issues_lens):
raw_json, _ = basic_scenario
sprint = Sprint.from_parsed_json(raw_json, lambda x: base_issues)
sprint = sprint_issues_lens[0].status_metrics.set(
StatusMetrics(
started=False,
finished=True,
start=datetime(
2019, 12, 30, 9, 0, 0, tzinfo=timezone(timedelta(hours=1))),
end=datetime(
2020, 1, 1, 8, 59, 0, tzinfo=timezone(timedelta(hours=1))),
days_taken=1),
)(sprint)
assert sprint.started_in_sprint(sprint.issues[0]) is False
| 32.632184
| 104
| 0.613244
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.