hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7903fd0ee7c73dba66b4a548782c9343fc95b6ec
| 3,928
|
py
|
Python
|
custom_components/mintmobile/api.py
|
KTibow/HA-Mint-Mobile
|
55ef863b09608e44e13399b4577893d5405b78be
|
[
"MIT"
] | null | null | null |
custom_components/mintmobile/api.py
|
KTibow/HA-Mint-Mobile
|
55ef863b09608e44e13399b4577893d5405b78be
|
[
"MIT"
] | null | null | null |
custom_components/mintmobile/api.py
|
KTibow/HA-Mint-Mobile
|
55ef863b09608e44e13399b4577893d5405b78be
|
[
"MIT"
] | null | null | null |
import requests
import datetime
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
class MintMobile:
def __init__(self, phone_number, password):
self.phone_number = phone_number
self.password = password
self.token=""
self.id=""
self.family_members=[]
self.info={}
def login(self):
#print("Logging Into " + self.phone_number)
r=requests.post('https://w3b-api.ultramobile.com/v1/mint/login?', json = {"msisdn":self.phone_number,"password":self.password})
if r.status_code == 200:
response=r.json()
self.id=response['id']
self.token=response['token']
self.info[self.id]={"phone_number":self.phone_number}
self.master_account_details()
return True
else:
return False
def master_account_details(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'?', auth=BearerAuth(str(self.token)))
response=r.json()
self.info[self.id]['line_name']=response['firstName']
self.info[self.id]['endOfCycle']=self.epoch_days_remaining(response['plan']['endOfCycle'])
self.info[self.id]['months']=response['plan']['months']
self.info[self.id]['exp']=self.epoch_days_remaining(response['plan']['exp'])
def data_remaining(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/data?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['remaining4G'])
self.info[self.id]['remaining4G']=response['remaining4G_GB']
return self.info
def conv_MB_to_GB(self,input_megabyte):
gigabyte = 1.0/1024
convert_gb = gigabyte * input_megabyte
convert_gb=round(convert_gb, 2)
return convert_gb
def epoch_days_remaining(self,epoch):
dt1 = datetime.datetime.fromtimestamp(epoch)
dt2 = datetime.datetime.now()
delta = dt1 - dt2
return delta.days
def get_family_members(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/multi-line?', auth=BearerAuth(str(self.token)))
response=r.json()
for activeMembers in response['activeMembers']:
self.family_members.append(activeMembers['id'])
self.info[activeMembers['id']]={}
#self.info[activeMembers['id']]={"phone_number":activeMembers['msisdn'],"line_name":activeMembers['nickName']}
self.info[activeMembers['id']]["phone_number"]=activeMembers['msisdn']
self.info[activeMembers['id']]["line_name"]=activeMembers['nickName']
self.info[activeMembers['id']]["endOfCycle"]=self.epoch_days_remaining(activeMembers['currentPlan']["rechargeDate"])
self.info[activeMembers['id']]["months"]=activeMembers['currentPlan']["duration"]
self.info[activeMembers['id']]["exp"]=self.epoch_days_remaining(activeMembers['nextPlan']["renewalDate"])
self.family_data_remaining()
def family_data_remaining(self):
for member in self.family_members:
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+self.id+'/multi-line/'+member+'/usage?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['data']['remaining4G'])
self.info[member]['remaining4G']=response['remaining4G_GB']
def get_all_data_remaining(self):
self.login()
self.data_remaining()
self.get_family_members()
return self.info
def lines(self):
self.login()
self.get_family_members()
return self.info.keys()
| 41.347368
| 152
| 0.643075
|
import requests
import datetime
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
class MintMobile:
def __init__(self, phone_number, password):
self.phone_number = phone_number
self.password = password
self.token=""
self.id=""
self.family_members=[]
self.info={}
def login(self):
r=requests.post('https://w3b-api.ultramobile.com/v1/mint/login?', json = {"msisdn":self.phone_number,"password":self.password})
if r.status_code == 200:
response=r.json()
self.id=response['id']
self.token=response['token']
self.info[self.id]={"phone_number":self.phone_number}
self.master_account_details()
return True
else:
return False
def master_account_details(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'?', auth=BearerAuth(str(self.token)))
response=r.json()
self.info[self.id]['line_name']=response['firstName']
self.info[self.id]['endOfCycle']=self.epoch_days_remaining(response['plan']['endOfCycle'])
self.info[self.id]['months']=response['plan']['months']
self.info[self.id]['exp']=self.epoch_days_remaining(response['plan']['exp'])
def data_remaining(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/data?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['remaining4G'])
self.info[self.id]['remaining4G']=response['remaining4G_GB']
return self.info
def conv_MB_to_GB(self,input_megabyte):
gigabyte = 1.0/1024
convert_gb = gigabyte * input_megabyte
convert_gb=round(convert_gb, 2)
return convert_gb
def epoch_days_remaining(self,epoch):
dt1 = datetime.datetime.fromtimestamp(epoch)
dt2 = datetime.datetime.now()
delta = dt1 - dt2
return delta.days
def get_family_members(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/multi-line?', auth=BearerAuth(str(self.token)))
response=r.json()
for activeMembers in response['activeMembers']:
self.family_members.append(activeMembers['id'])
self.info[activeMembers['id']]={}
self.info[activeMembers['id']]["phone_number"]=activeMembers['msisdn']
self.info[activeMembers['id']]["line_name"]=activeMembers['nickName']
self.info[activeMembers['id']]["endOfCycle"]=self.epoch_days_remaining(activeMembers['currentPlan']["rechargeDate"])
self.info[activeMembers['id']]["months"]=activeMembers['currentPlan']["duration"]
self.info[activeMembers['id']]["exp"]=self.epoch_days_remaining(activeMembers['nextPlan']["renewalDate"])
self.family_data_remaining()
def family_data_remaining(self):
for member in self.family_members:
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+self.id+'/multi-line/'+member+'/usage?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['data']['remaining4G'])
self.info[member]['remaining4G']=response['remaining4G_GB']
def get_all_data_remaining(self):
self.login()
self.data_remaining()
self.get_family_members()
return self.info
def lines(self):
self.login()
self.get_family_members()
return self.info.keys()
| true
| true
|
7903fd1d5c77b3edd8916e6063615ebb5e8d8931
| 1,783
|
py
|
Python
|
nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 8
|
2019-05-29T09:38:30.000Z
|
2021-01-20T03:36:59.000Z
|
nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 12
|
2021-03-09T03:01:16.000Z
|
2022-03-11T23:59:36.000Z
|
nipype/interfaces/slicer/legacy/tests/test_auto_MultiResolutionAffineRegistration.py
|
felixsc1/nipype
|
e722d6170593583f16ddfcb95473e5d30b5f1d7c
|
[
"Apache-2.0"
] | 1
|
2020-07-17T12:49:49.000Z
|
2020-07-17T12:49:49.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..registration import MultiResolutionAffineRegistration
def test_MultiResolutionAffineRegistration_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
fixedImage=dict(
argstr='%s',
position=-2,
),
fixedImageMask=dict(argstr='--fixedImageMask %s', ),
fixedImageROI=dict(argstr='--fixedImageROI %s', ),
metricTolerance=dict(argstr='--metricTolerance %f', ),
movingImage=dict(
argstr='%s',
position=-1,
),
numIterations=dict(argstr='--numIterations %d', ),
numLineIterations=dict(argstr='--numLineIterations %d', ),
resampledImage=dict(
argstr='--resampledImage %s',
hash_files=False,
),
saveTransform=dict(
argstr='--saveTransform %s',
hash_files=False,
),
stepSize=dict(argstr='--stepSize %f', ),
stepTolerance=dict(argstr='--stepTolerance %f', ),
)
inputs = MultiResolutionAffineRegistration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MultiResolutionAffineRegistration_outputs():
output_map = dict(
resampledImage=dict(),
saveTransform=dict(),
)
outputs = MultiResolutionAffineRegistration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 34.288462
| 67
| 0.610768
|
from __future__ import unicode_literals
from ..registration import MultiResolutionAffineRegistration
def test_MultiResolutionAffineRegistration_inputs():
input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True,
usedefault=True,
),
fixedImage=dict(
argstr='%s',
position=-2,
),
fixedImageMask=dict(argstr='--fixedImageMask %s', ),
fixedImageROI=dict(argstr='--fixedImageROI %s', ),
metricTolerance=dict(argstr='--metricTolerance %f', ),
movingImage=dict(
argstr='%s',
position=-1,
),
numIterations=dict(argstr='--numIterations %d', ),
numLineIterations=dict(argstr='--numLineIterations %d', ),
resampledImage=dict(
argstr='--resampledImage %s',
hash_files=False,
),
saveTransform=dict(
argstr='--saveTransform %s',
hash_files=False,
),
stepSize=dict(argstr='--stepSize %f', ),
stepTolerance=dict(argstr='--stepTolerance %f', ),
)
inputs = MultiResolutionAffineRegistration.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_MultiResolutionAffineRegistration_outputs():
output_map = dict(
resampledImage=dict(),
saveTransform=dict(),
)
outputs = MultiResolutionAffineRegistration.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| true
| true
|
7903fdbae9e4657eae5e15d13111c09ad7522103
| 2,147
|
py
|
Python
|
Python/DataStructure/TreeDS/ExpressionTree.py
|
adityalata/AlgorithmsAndDataStructure
|
93de2f63ad027f42c368c78690b7a59545541852
|
[
"MIT"
] | null | null | null |
Python/DataStructure/TreeDS/ExpressionTree.py
|
adityalata/AlgorithmsAndDataStructure
|
93de2f63ad027f42c368c78690b7a59545541852
|
[
"MIT"
] | null | null | null |
Python/DataStructure/TreeDS/ExpressionTree.py
|
adityalata/AlgorithmsAndDataStructure
|
93de2f63ad027f42c368c78690b7a59545541852
|
[
"MIT"
] | 1
|
2021-01-12T21:00:04.000Z
|
2021-01-12T21:00:04.000Z
|
# Date 3/10/2020
# __Author__ : AdityaLata
# __Package__ : Python 3
# __GitHub__ : https://www.github.com/adityalata
from Python.DataStructure.TreeDS.Node import Node
# A utility function to check if 'c' is an operator
def isOperator(c):
if c == '+' or c == '-' or c == '*' or c == '/' or c == '^':
return True
else:
return False
# Returns root of constructed tree for given postfix expression string
def getExpressionTree(postfix):
stack = []
# Traverse through every character of input expression
for char in postfix:
# for space separated postfix
if char == " ":
continue
# if operand, simply push into stack
elif not isOperator(char):
t = Node(char)
stack.append(t)
# Operator
else:
# Pop two top nodes
t = Node(char)
t1 = stack.pop()
t2 = stack.pop()
# make them children
t.right = t1
t.left = t2
# Add this subexpression to stack
stack.append(t)
# Only element will be the root of expression tree
t = stack.pop()
return t
# Returns value evaluated from given root of valid(full binary tree) expression tree
def evaluateExpressionTree(rootNode):
# empty tree
if rootNode is None:
return 0
# leaf node
if rootNode.left is None and rootNode.right is None:
return int(rootNode.value)
# evaluate left tree
leftSubtreeValue = evaluateExpressionTree(rootNode.left)
# evaluate right tree
rightSubtreeValue = evaluateExpressionTree(rootNode.right)
# check which operation to apply on non leaf node
if rootNode.value == '+':
return leftSubtreeValue + rightSubtreeValue
elif rootNode.value == '-':
return leftSubtreeValue - rightSubtreeValue
elif rootNode.value == '*':
return leftSubtreeValue * rightSubtreeValue
elif rootNode.value == '^':
return leftSubtreeValue ** rightSubtreeValue
elif rootNode.value == '/':
return leftSubtreeValue / rightSubtreeValue
| 25.258824
| 84
| 0.615277
|
from Python.DataStructure.TreeDS.Node import Node
def isOperator(c):
if c == '+' or c == '-' or c == '*' or c == '/' or c == '^':
return True
else:
return False
def getExpressionTree(postfix):
stack = []
for char in postfix:
if char == " ":
continue
elif not isOperator(char):
t = Node(char)
stack.append(t)
else:
t = Node(char)
t1 = stack.pop()
t2 = stack.pop()
t.right = t1
t.left = t2
stack.append(t)
t = stack.pop()
return t
def evaluateExpressionTree(rootNode):
if rootNode is None:
return 0
if rootNode.left is None and rootNode.right is None:
return int(rootNode.value)
leftSubtreeValue = evaluateExpressionTree(rootNode.left)
rightSubtreeValue = evaluateExpressionTree(rootNode.right)
if rootNode.value == '+':
return leftSubtreeValue + rightSubtreeValue
elif rootNode.value == '-':
return leftSubtreeValue - rightSubtreeValue
elif rootNode.value == '*':
return leftSubtreeValue * rightSubtreeValue
elif rootNode.value == '^':
return leftSubtreeValue ** rightSubtreeValue
elif rootNode.value == '/':
return leftSubtreeValue / rightSubtreeValue
| true
| true
|
7903fe2fe472c55bd786a4267486161ebf6e899d
| 751
|
py
|
Python
|
support/classifiers.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 6
|
2016-03-29T07:26:20.000Z
|
2019-02-12T11:52:05.000Z
|
support/classifiers.py
|
rknop/amuse
|
85d5bdcc29cfc87dc69d91c264101fafd6658aec
|
[
"Apache-2.0"
] | 9
|
2017-03-16T12:12:40.000Z
|
2021-12-20T10:49:56.000Z
|
support/classifiers.py
|
rieder/amuse
|
3ac3b6b8f922643657279ddee5c8ab3fc0440d5e
|
[
"Apache-2.0"
] | null | null | null |
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Topic :: Scientific/Engineering :: Astronomy',
]
def main():
for i in classifiers:
print(i)
if __name__ == "__main__":
main()
| 30.04
| 57
| 0.624501
|
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Topic :: Scientific/Engineering :: Astronomy',
]
def main():
for i in classifiers:
print(i)
if __name__ == "__main__":
main()
| true
| true
|
7903fe41431bd1906bdd213a65170f89577a7dc8
| 2,013
|
py
|
Python
|
farms2face/home/templatetags/common_tags.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/home/templatetags/common_tags.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | null | null | null |
farms2face/home/templatetags/common_tags.py
|
dev1farms2face/f2f
|
54e58187a68574bf2bd0dfb7e58a2b416336106a
|
[
"MIT"
] | 2
|
2018-06-19T12:12:08.000Z
|
2018-06-25T18:45:36.000Z
|
from django import template
from home.models import Recipe, MixingAgent, Base, Ingredient, FacePack, CustomFacePack
import pdb
register = template.Library()
@register.inclusion_tag('facepack.html')
def facepack_display(item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
}
return {'item': res }
def facepack_display_abs(base_url, item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
#'base_url' : request.get_raw_uri().replace(request.get_full_path(),''),
'base_url' : base_url,
}
return {'item': res }
| 31.952381
| 101
| 0.562842
|
from django import template
from home.models import Recipe, MixingAgent, Base, Ingredient, FacePack, CustomFacePack
import pdb
register = template.Library()
@register.inclusion_tag('facepack.html')
def facepack_display(item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
}
return {'item': res }
def facepack_display_abs(base_url, item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
'base_url' : base_url,
}
return {'item': res }
| true
| true
|
7904001743f764f3d1fd7a9ab2e28238025fbcaa
| 20,446
|
py
|
Python
|
Pyro5/serializers.py
|
gst/Pyro5
|
032f635e771b2770f751f133274f0c6d1132d6d8
|
[
"MIT"
] | null | null | null |
Pyro5/serializers.py
|
gst/Pyro5
|
032f635e771b2770f751f133274f0c6d1132d6d8
|
[
"MIT"
] | null | null | null |
Pyro5/serializers.py
|
gst/Pyro5
|
032f635e771b2770f751f133274f0c6d1132d6d8
|
[
"MIT"
] | null | null | null |
"""
The various serializers.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
import array
import builtins
import uuid
import logging
import struct
import datetime
import decimal
import numbers
import inspect
import marshal
import json
import serpent
import msgpack
from . import errors
__all__ = ["SerializerBase", "SerpentSerializer", "JsonSerializer", "MarshalSerializer", "MsgpackSerializer",
"serializers", "serializers_by_id"]
log = logging.getLogger("Pyro5.serializers")
if '-' in serpent.__version__:
ver = serpent.__version__.split('-', 1)[0]
else:
ver = serpent.__version__
ver = tuple(map(int, ver.split(".")))
if ver < (1, 27):
raise RuntimeError("requires serpent 1.27 or better")
if msgpack.version < (0, 5, 2):
raise RuntimeError("requires msgpack 0.5.2 or better")
all_exceptions = {}
for name, t in vars(builtins).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
for name, t in vars(errors).items():
if type(t) is type and issubclass(t, errors.PyroError):
all_exceptions[name] = t
def pyro_class_serpent_serializer(obj, serializer, stream, level):
# Override the default way that a Pyro URI/proxy/daemon is serialized.
# Because it defines a __getstate__ it would otherwise just become a tuple,
# and not be deserialized as a class.
d = SerializerBase.class_to_dict(obj)
serializer.ser_builtins_dict(d, stream, level)
def serialize_pyro_object_to_dict(obj):
return {
"__class__": "{:s}.{:s}".format(obj.__module__, obj.__class__.__name__),
"state": obj.__getstate__()
}
class SerializerBase(object):
"""Base class for (de)serializer implementations (which must be thread safe)"""
serializer_id = 0 # define uniquely in subclass
__custom_class_to_dict_registry = {}
__custom_dict_to_class_registry = {}
def loads(self, data):
raise NotImplementedError("implement in subclass")
def loadsCall(self, data):
raise NotImplementedError("implement in subclass")
def dumps(self, data):
raise NotImplementedError("implement in subclass")
def dumpsCall(self, obj, method, vargs, kwargs):
raise NotImplementedError("implement in subclass")
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
raise NotImplementedError("implement in subclass")
def _convertToBytes(self, data):
if type(data) is bytearray:
return bytes(data)
if type(data) is memoryview:
return data.tobytes()
return data
@classmethod
def register_class_to_dict(cls, clazz, converter, serpent_too=True):
"""Registers a custom function that returns a dict representation of objects of the given class.
The function is called with a single parameter; the object to be converted to a dict."""
cls.__custom_class_to_dict_registry[clazz] = converter
if serpent_too:
try:
def serpent_converter(obj, serializer, stream, level):
d = converter(obj)
serializer.ser_builtins_dict(d, stream, level)
serpent.register_class(clazz, serpent_converter)
except errors.ProtocolError:
pass
@classmethod
def unregister_class_to_dict(cls, clazz):
"""Removes the to-dict conversion function registered for the given class. Objects of the class
will be serialized by the default mechanism again."""
if clazz in cls.__custom_class_to_dict_registry:
del cls.__custom_class_to_dict_registry[clazz]
try:
serpent.unregister_class(clazz)
except errors.ProtocolError:
pass
@classmethod
def register_dict_to_class(cls, classname, converter):
"""
Registers a custom converter function that creates objects from a dict with the given classname tag in it.
The function is called with two parameters: the classname and the dictionary to convert to an instance of the class.
"""
cls.__custom_dict_to_class_registry[classname] = converter
@classmethod
def unregister_dict_to_class(cls, classname):
"""
Removes the converter registered for the given classname. Dicts with that classname tag
will be deserialized by the default mechanism again.
"""
if classname in cls.__custom_dict_to_class_registry:
del cls.__custom_dict_to_class_registry[classname]
@classmethod
def class_to_dict(cls, obj):
"""
Convert a non-serializable object to a dict. Partly borrowed from serpent.
"""
for clazz in cls.__custom_class_to_dict_registry:
if isinstance(obj, clazz):
return cls.__custom_class_to_dict_registry[clazz](obj)
if type(obj) in (set, dict, tuple, list):
# we use a ValueError to mirror the exception type returned by serpent and other serializers
raise ValueError("can't serialize type " + str(obj.__class__) + " into a dict")
if hasattr(obj, "_pyroDaemon"):
obj._pyroDaemon = None
if isinstance(obj, BaseException):
# special case for exceptions
return {
"__class__": obj.__class__.__module__ + "." + obj.__class__.__name__,
"__exception__": True,
"args": obj.args,
"attributes": vars(obj) # add custom exception attributes
}
try:
value = obj.__getstate__()
except AttributeError:
pass
else:
if isinstance(value, dict):
return value
try:
value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
except TypeError:
if hasattr(obj, "__slots__"):
# use the __slots__ instead of the vars dict
value = {}
for slot in obj.__slots__:
value[slot] = getattr(obj, slot)
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
else:
raise errors.SerializeError("don't know how to serialize class " + str(obj.__class__) +
" using serializer " + str(cls.__name__) +
". Give it vars() or an appropriate __getstate__")
@classmethod
def dict_to_class(cls, data):
"""
Recreate an object out of a dict containing the class name and the attributes.
Only a fixed set of classes are recognized.
"""
from . import core, client, server # XXX circular
classname = data.get("__class__", "<unknown>")
if isinstance(classname, bytes):
classname = classname.decode("utf-8")
if classname in cls.__custom_dict_to_class_registry:
converter = cls.__custom_dict_to_class_registry[classname]
return converter(classname, data)
if "__" in classname:
raise errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname)
# for performance, the constructors below are hardcoded here instead of added on a per-class basis to the dict-to-class registry
if classname == "Pyro5.core.URI":
uri = core.URI.__new__(core.URI)
uri.__setstate__(data["state"])
return uri
elif classname == "Pyro5.client.Proxy":
proxy = client.Proxy.__new__(client.Proxy)
proxy.__setstate__(data["state"])
return proxy
elif classname == "Pyro5.server.Daemon":
daemon = server.Daemon.__new__(server.Daemon)
daemon.__setstate__(data["state"])
return daemon
elif classname.startswith("Pyro5.util."):
if classname == "Pyro5.util.SerpentSerializer":
return SerpentSerializer()
elif classname == "Pyro5.util.MarshalSerializer":
return MarshalSerializer()
elif classname == "Pyro5.util.JsonSerializer":
return JsonSerializer()
elif classname == "Pyro5.util.MsgpackSerializer":
return MsgpackSerializer()
elif classname.startswith("Pyro5.errors."):
errortype = getattr(errors, classname.split('.', 2)[2])
if issubclass(errortype, errors.PyroError):
return SerializerBase.make_exception(errortype, data)
elif classname == "struct.error":
return SerializerBase.make_exception(struct.error, data)
elif classname == "Pyro5.core._ExceptionWrapper":
ex = data["exception"]
if isinstance(ex, dict) and "__class__" in ex:
ex = SerializerBase.dict_to_class(ex)
return core._ExceptionWrapper(ex)
elif data.get("__exception__", False):
if classname in all_exceptions:
return SerializerBase.make_exception(all_exceptions[classname], data)
# python 2.x: exceptions.ValueError
# python 3.x: builtins.ValueError
# translate to the appropriate namespace...
namespace, short_classname = classname.split('.', 1)
if namespace in ("builtins", "exceptions"):
exceptiontype = getattr(builtins, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
elif namespace == "sqlite3" and short_classname.endswith("Error"):
import sqlite3
exceptiontype = getattr(sqlite3, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
log.warning("unsupported serialized class: " + classname)
raise errors.SerializeError("unsupported serialized class: " + classname)
@staticmethod
def make_exception(exceptiontype, data):
ex = exceptiontype(*data["args"])
if "attributes" in data:
# restore custom attributes on the exception object
for attr, value in data["attributes"].items():
setattr(ex, attr, value)
return ex
def recreate_classes(self, literal):
t = type(literal)
if t is set:
return {self.recreate_classes(x) for x in literal}
if t is list:
return [self.recreate_classes(x) for x in literal]
if t is tuple:
return tuple(self.recreate_classes(x) for x in literal)
if t is dict:
if "__class__" in literal:
return self.dict_to_class(literal)
result = {}
for key, value in literal.items():
result[key] = self.recreate_classes(value)
return result
return literal
def __eq__(self, other):
"""this equality method is only to support the unit tests of this class"""
return isinstance(other, SerializerBase) and vars(self) == vars(other)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
class SerpentSerializer(SerializerBase):
"""(de)serializer that wraps the serpent serialization protocol."""
serializer_id = 1 # never change this
def dumpsCall(self, obj, method, vargs, kwargs):
return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True)
def dumps(self, data):
return serpent.dumps(data, module_in_classname=True)
def loadsCall(self, data):
obj, method, vargs, kwargs = serpent.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
return self.recreate_classes(serpent.loads(data))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def custom_serializer(obj, serpent_serializer, outputstream, indentlevel):
replaced = replacement_function(obj)
if replaced is obj:
serpent_serializer.ser_default_class(replaced, outputstream, indentlevel)
else:
serpent_serializer._serialize(replaced, outputstream, indentlevel)
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
serpent.register_class(object_type, custom_serializer)
@classmethod
def dict_to_class(cls, data):
if data.get("__class__") == "float":
return float(data["value"]) # serpent encodes a float nan as a special class dict like this
return super(SerpentSerializer, cls).dict_to_class(data)
class MarshalSerializer(SerializerBase):
"""(de)serializer that wraps the marshal serialization protocol."""
serializer_id = 2 # never change this
def dumpsCall(self, obj, method, vargs, kwargs):
vargs = [self.convert_obj_into_marshallable(value) for value in vargs]
kwargs = {key: self.convert_obj_into_marshallable(value) for key, value in kwargs.items()}
return marshal.dumps((obj, method, vargs, kwargs))
def dumps(self, data):
return marshal.dumps(self.convert_obj_into_marshallable(data))
def loadsCall(self, data):
data = self._convertToBytes(data)
obj, method, vargs, kwargs = marshal.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
data = self._convertToBytes(data)
return self.recreate_classes(marshal.loads(data))
def convert_obj_into_marshallable(self, obj):
marshalable_types = (str, int, float, type(None), bool, complex, bytes, bytearray,
tuple, set, frozenset, list, dict)
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
if isinstance(obj, marshalable_types):
return obj
return self.class_to_dict(obj)
@classmethod
def class_to_dict(cls, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
return super(MarshalSerializer, cls).class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
pass # marshal serializer doesn't support per-type hooks
class JsonSerializer(SerializerBase):
"""(de)serializer that wraps the json serialization protocol."""
serializer_id = 3 # never change this
__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
data = {"object": obj, "method": method, "params": vargs, "kwargs": kwargs}
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def dumps(self, data):
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def loadsCall(self, data):
data = self._convertToBytes(data).decode("utf-8")
data = json.loads(data)
vargs = self.recreate_classes(data["params"])
kwargs = self.recreate_classes(data["kwargs"])
return data["object"], data["method"], vargs, kwargs
def loads(self, data):
data = self._convertToBytes(data).decode("utf-8")
return self.recreate_classes(json.loads(data))
def default(self, obj):
replacer = self.__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj) # json module can't deal with sets so we make a tuple out of it
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls.__type_replacements[object_type] = replacement_function
class MsgpackSerializer(SerializerBase):
"""(de)serializer that wraps the msgpack serialization protocol."""
serializer_id = 4 # never change this
__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
return msgpack.packb((obj, method, vargs, kwargs), use_bin_type=True, default=self.default)
def dumps(self, data):
return msgpack.packb(data, use_bin_type=True, default=self.default)
def loadsCall(self, data):
return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook)
def loads(self, data):
return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook, ext_hook=self.ext_hook)
def default(self, obj):
replacer = self.__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj) # msgpack module can't deal with sets so we make a tuple out of it
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, bytearray):
return bytes(obj)
if isinstance(obj, complex):
return msgpack.ExtType(0x30, struct.pack("dd", obj.real, obj.imag))
if isinstance(obj, datetime.datetime):
if obj.tzinfo:
raise errors.SerializeError("msgpack cannot serialize datetime with timezone info")
return msgpack.ExtType(0x32, struct.pack("d", obj.timestamp()))
if isinstance(obj, datetime.date):
return msgpack.ExtType(0x33, struct.pack("l", obj.toordinal()))
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, numbers.Number):
return msgpack.ExtType(0x31, str(obj).encode("ascii")) # long
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
def object_hook(self, obj):
if "__class__" in obj:
return self.dict_to_class(obj)
return obj
def ext_hook(self, code, data):
if code == 0x30:
real, imag = struct.unpack("dd", data)
return complex(real, imag)
if code == 0x31:
return int(data)
if code == 0x32:
return datetime.datetime.fromtimestamp(struct.unpack("d", data)[0])
if code == 0x33:
return datetime.date.fromordinal(struct.unpack("l", data)[0])
raise errors.SerializeError("invalid ext code for msgpack: " + str(code))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls.__type_replacements[object_type] = replacement_function
"""The various serializers that are supported"""
serializers = {
"serpent": SerpentSerializer(),
"marshal": MarshalSerializer(),
"json": JsonSerializer(),
"msgpack": MsgpackSerializer()
}
"""The available serializers by their internal id"""
serializers_by_id = {ser.serializer_id: ser for ser in serializers.values()}
| 40.487129
| 136
| 0.638316
|
import array
import builtins
import uuid
import logging
import struct
import datetime
import decimal
import numbers
import inspect
import marshal
import json
import serpent
import msgpack
from . import errors
__all__ = ["SerializerBase", "SerpentSerializer", "JsonSerializer", "MarshalSerializer", "MsgpackSerializer",
"serializers", "serializers_by_id"]
log = logging.getLogger("Pyro5.serializers")
if '-' in serpent.__version__:
ver = serpent.__version__.split('-', 1)[0]
else:
ver = serpent.__version__
ver = tuple(map(int, ver.split(".")))
if ver < (1, 27):
raise RuntimeError("requires serpent 1.27 or better")
if msgpack.version < (0, 5, 2):
raise RuntimeError("requires msgpack 0.5.2 or better")
all_exceptions = {}
for name, t in vars(builtins).items():
if type(t) is type and issubclass(t, BaseException):
all_exceptions[name] = t
for name, t in vars(errors).items():
if type(t) is type and issubclass(t, errors.PyroError):
all_exceptions[name] = t
def pyro_class_serpent_serializer(obj, serializer, stream, level):
d = SerializerBase.class_to_dict(obj)
serializer.ser_builtins_dict(d, stream, level)
def serialize_pyro_object_to_dict(obj):
return {
"__class__": "{:s}.{:s}".format(obj.__module__, obj.__class__.__name__),
"state": obj.__getstate__()
}
class SerializerBase(object):
serializer_id = 0
__custom_class_to_dict_registry = {}
__custom_dict_to_class_registry = {}
def loads(self, data):
raise NotImplementedError("implement in subclass")
def loadsCall(self, data):
raise NotImplementedError("implement in subclass")
def dumps(self, data):
raise NotImplementedError("implement in subclass")
def dumpsCall(self, obj, method, vargs, kwargs):
raise NotImplementedError("implement in subclass")
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
raise NotImplementedError("implement in subclass")
def _convertToBytes(self, data):
if type(data) is bytearray:
return bytes(data)
if type(data) is memoryview:
return data.tobytes()
return data
@classmethod
def register_class_to_dict(cls, clazz, converter, serpent_too=True):
cls.__custom_class_to_dict_registry[clazz] = converter
if serpent_too:
try:
def serpent_converter(obj, serializer, stream, level):
d = converter(obj)
serializer.ser_builtins_dict(d, stream, level)
serpent.register_class(clazz, serpent_converter)
except errors.ProtocolError:
pass
@classmethod
def unregister_class_to_dict(cls, clazz):
if clazz in cls.__custom_class_to_dict_registry:
del cls.__custom_class_to_dict_registry[clazz]
try:
serpent.unregister_class(clazz)
except errors.ProtocolError:
pass
@classmethod
def register_dict_to_class(cls, classname, converter):
cls.__custom_dict_to_class_registry[classname] = converter
@classmethod
def unregister_dict_to_class(cls, classname):
if classname in cls.__custom_dict_to_class_registry:
del cls.__custom_dict_to_class_registry[classname]
@classmethod
def class_to_dict(cls, obj):
for clazz in cls.__custom_class_to_dict_registry:
if isinstance(obj, clazz):
return cls.__custom_class_to_dict_registry[clazz](obj)
if type(obj) in (set, dict, tuple, list):
raise ValueError("can't serialize type " + str(obj.__class__) + " into a dict")
if hasattr(obj, "_pyroDaemon"):
obj._pyroDaemon = None
if isinstance(obj, BaseException):
# special case for exceptions
return {
"__class__": obj.__class__.__module__ + "." + obj.__class__.__name__,
"__exception__": True,
"args": obj.args,
"attributes": vars(obj) # add custom exception attributes
}
try:
value = obj.__getstate__()
except AttributeError:
pass
else:
if isinstance(value, dict):
return value
try:
value = dict(vars(obj)) # make sure we can serialize anything that resembles a dict
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
except TypeError:
if hasattr(obj, "__slots__"):
# use the __slots__ instead of the vars dict
value = {}
for slot in obj.__slots__:
value[slot] = getattr(obj, slot)
value["__class__"] = obj.__class__.__module__ + "." + obj.__class__.__name__
return value
else:
raise errors.SerializeError("don't know how to serialize class " + str(obj.__class__) +
" using serializer " + str(cls.__name__) +
". Give it vars() or an appropriate __getstate__")
@classmethod
def dict_to_class(cls, data):
from . import core, client, server
classname = data.get("__class__", "<unknown>")
if isinstance(classname, bytes):
classname = classname.decode("utf-8")
if classname in cls.__custom_dict_to_class_registry:
converter = cls.__custom_dict_to_class_registry[classname]
return converter(classname, data)
if "__" in classname:
raise errors.SecurityError("refused to deserialize types with double underscores in their name: " + classname)
if classname == "Pyro5.core.URI":
uri = core.URI.__new__(core.URI)
uri.__setstate__(data["state"])
return uri
elif classname == "Pyro5.client.Proxy":
proxy = client.Proxy.__new__(client.Proxy)
proxy.__setstate__(data["state"])
return proxy
elif classname == "Pyro5.server.Daemon":
daemon = server.Daemon.__new__(server.Daemon)
daemon.__setstate__(data["state"])
return daemon
elif classname.startswith("Pyro5.util."):
if classname == "Pyro5.util.SerpentSerializer":
return SerpentSerializer()
elif classname == "Pyro5.util.MarshalSerializer":
return MarshalSerializer()
elif classname == "Pyro5.util.JsonSerializer":
return JsonSerializer()
elif classname == "Pyro5.util.MsgpackSerializer":
return MsgpackSerializer()
elif classname.startswith("Pyro5.errors."):
errortype = getattr(errors, classname.split('.', 2)[2])
if issubclass(errortype, errors.PyroError):
return SerializerBase.make_exception(errortype, data)
elif classname == "struct.error":
return SerializerBase.make_exception(struct.error, data)
elif classname == "Pyro5.core._ExceptionWrapper":
ex = data["exception"]
if isinstance(ex, dict) and "__class__" in ex:
ex = SerializerBase.dict_to_class(ex)
return core._ExceptionWrapper(ex)
elif data.get("__exception__", False):
if classname in all_exceptions:
return SerializerBase.make_exception(all_exceptions[classname], data)
namespace, short_classname = classname.split('.', 1)
if namespace in ("builtins", "exceptions"):
exceptiontype = getattr(builtins, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
elif namespace == "sqlite3" and short_classname.endswith("Error"):
import sqlite3
exceptiontype = getattr(sqlite3, short_classname)
if issubclass(exceptiontype, BaseException):
return SerializerBase.make_exception(exceptiontype, data)
log.warning("unsupported serialized class: " + classname)
raise errors.SerializeError("unsupported serialized class: " + classname)
@staticmethod
def make_exception(exceptiontype, data):
ex = exceptiontype(*data["args"])
if "attributes" in data:
for attr, value in data["attributes"].items():
setattr(ex, attr, value)
return ex
def recreate_classes(self, literal):
t = type(literal)
if t is set:
return {self.recreate_classes(x) for x in literal}
if t is list:
return [self.recreate_classes(x) for x in literal]
if t is tuple:
return tuple(self.recreate_classes(x) for x in literal)
if t is dict:
if "__class__" in literal:
return self.dict_to_class(literal)
result = {}
for key, value in literal.items():
result[key] = self.recreate_classes(value)
return result
return literal
def __eq__(self, other):
return isinstance(other, SerializerBase) and vars(self) == vars(other)
def __ne__(self, other):
return not self.__eq__(other)
__hash__ = object.__hash__
class SerpentSerializer(SerializerBase):
serializer_id = 1
def dumpsCall(self, obj, method, vargs, kwargs):
return serpent.dumps((obj, method, vargs, kwargs), module_in_classname=True)
def dumps(self, data):
return serpent.dumps(data, module_in_classname=True)
def loadsCall(self, data):
obj, method, vargs, kwargs = serpent.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
return self.recreate_classes(serpent.loads(data))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
def custom_serializer(obj, serpent_serializer, outputstream, indentlevel):
replaced = replacement_function(obj)
if replaced is obj:
serpent_serializer.ser_default_class(replaced, outputstream, indentlevel)
else:
serpent_serializer._serialize(replaced, outputstream, indentlevel)
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
serpent.register_class(object_type, custom_serializer)
@classmethod
def dict_to_class(cls, data):
if data.get("__class__") == "float":
return float(data["value"])
return super(SerpentSerializer, cls).dict_to_class(data)
class MarshalSerializer(SerializerBase):
serializer_id = 2
def dumpsCall(self, obj, method, vargs, kwargs):
vargs = [self.convert_obj_into_marshallable(value) for value in vargs]
kwargs = {key: self.convert_obj_into_marshallable(value) for key, value in kwargs.items()}
return marshal.dumps((obj, method, vargs, kwargs))
def dumps(self, data):
return marshal.dumps(self.convert_obj_into_marshallable(data))
def loadsCall(self, data):
data = self._convertToBytes(data)
obj, method, vargs, kwargs = marshal.loads(data)
vargs = self.recreate_classes(vargs)
kwargs = self.recreate_classes(kwargs)
return obj, method, vargs, kwargs
def loads(self, data):
data = self._convertToBytes(data)
return self.recreate_classes(marshal.loads(data))
def convert_obj_into_marshallable(self, obj):
marshalable_types = (str, int, float, type(None), bool, complex, bytes, bytearray,
tuple, set, frozenset, list, dict)
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
if isinstance(obj, marshalable_types):
return obj
return self.class_to_dict(obj)
@classmethod
def class_to_dict(cls, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
return super(MarshalSerializer, cls).class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
pass
class JsonSerializer(SerializerBase):
serializer_id = 3 # never change this
__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
data = {"object": obj, "method": method, "params": vargs, "kwargs": kwargs}
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def dumps(self, data):
data = json.dumps(data, ensure_ascii=False, default=self.default)
return data.encode("utf-8")
def loadsCall(self, data):
data = self._convertToBytes(data).decode("utf-8")
data = json.loads(data)
vargs = self.recreate_classes(data["params"])
kwargs = self.recreate_classes(data["kwargs"])
return data["object"], data["method"], vargs, kwargs
def loads(self, data):
data = self._convertToBytes(data).decode("utf-8")
return self.recreate_classes(json.loads(data))
def default(self, obj):
replacer = self.__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj) # json module can't deal with sets so we make a tuple out of it
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls.__type_replacements[object_type] = replacement_function
class MsgpackSerializer(SerializerBase):
serializer_id = 4
__type_replacements = {}
def dumpsCall(self, obj, method, vargs, kwargs):
return msgpack.packb((obj, method, vargs, kwargs), use_bin_type=True, default=self.default)
def dumps(self, data):
return msgpack.packb(data, use_bin_type=True, default=self.default)
def loadsCall(self, data):
return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook)
def loads(self, data):
return msgpack.unpackb(self._convertToBytes(data), raw=False, object_hook=self.object_hook, ext_hook=self.ext_hook)
def default(self, obj):
replacer = self.__type_replacements.get(type(obj), None)
if replacer:
obj = replacer(obj)
if isinstance(obj, set):
return tuple(obj)
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, bytearray):
return bytes(obj)
if isinstance(obj, complex):
return msgpack.ExtType(0x30, struct.pack("dd", obj.real, obj.imag))
if isinstance(obj, datetime.datetime):
if obj.tzinfo:
raise errors.SerializeError("msgpack cannot serialize datetime with timezone info")
return msgpack.ExtType(0x32, struct.pack("d", obj.timestamp()))
if isinstance(obj, datetime.date):
return msgpack.ExtType(0x33, struct.pack("l", obj.toordinal()))
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, numbers.Number):
return msgpack.ExtType(0x31, str(obj).encode("ascii")) # long
if isinstance(obj, array.array):
if obj.typecode == 'c':
return obj.tostring()
if obj.typecode == 'u':
return obj.tounicode()
return obj.tolist()
return self.class_to_dict(obj)
def object_hook(self, obj):
if "__class__" in obj:
return self.dict_to_class(obj)
return obj
def ext_hook(self, code, data):
if code == 0x30:
real, imag = struct.unpack("dd", data)
return complex(real, imag)
if code == 0x31:
return int(data)
if code == 0x32:
return datetime.datetime.fromtimestamp(struct.unpack("d", data)[0])
if code == 0x33:
return datetime.date.fromordinal(struct.unpack("l", data)[0])
raise errors.SerializeError("invalid ext code for msgpack: " + str(code))
@classmethod
def register_type_replacement(cls, object_type, replacement_function):
if object_type is type or not inspect.isclass(object_type):
raise ValueError("refusing to register replacement for a non-type or the type 'type' itself")
cls.__type_replacements[object_type] = replacement_function
serializers = {
"serpent": SerpentSerializer(),
"marshal": MarshalSerializer(),
"json": JsonSerializer(),
"msgpack": MsgpackSerializer()
}
serializers_by_id = {ser.serializer_id: ser for ser in serializers.values()}
| true
| true
|
790400b0b1b0003ce3a396e13e8927243452890d
| 4,152
|
py
|
Python
|
graphic/tsp_matt.py
|
mattaereal/AntColonyOptimization
|
b45df28cb181395d290d6c6accbc9297fa863aff
|
[
"MIT"
] | null | null | null |
graphic/tsp_matt.py
|
mattaereal/AntColonyOptimization
|
b45df28cb181395d290d6c6accbc9297fa863aff
|
[
"MIT"
] | null | null | null |
graphic/tsp_matt.py
|
mattaereal/AntColonyOptimization
|
b45df28cb181395d290d6c6accbc9297fa863aff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# -*- encoding: utf-8 -*-
import pygame
import sys
import numpy as np
CONST_LOCK_FILE = "lock.txt"
#CONST_GRAPH_FILE = "../tsptours/graph.tsp"
CONST_GRAPH_FILE = "graph.tsp"
CONST_STOP = "STOP"
CONST_CUSTOM_FILE = None
def main():
pygame.init()
screen = pygame.display.set_mode((700,700))
screen.fill((255,255,255))
pygame.display.set_caption("Ant Colony TSP Solver - press ENTER to solve")
graph = []
tour = []
cost = g = 0
state = 0
pygame.display.flip()
while (True):
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
print "El usuario ha decidido cerrar la aplicación."
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN and state == 0 and CONST_CUSTOM_FILE:
#print "Agregando la posición del click", pygame.mouse.get_pos()
data = np.loadtxt(CONST_CUSTOM_FILE, dtype=int, delimiter=',')
for line in data:
line = (line[0]*7, line[1]*7)
graph.append(line)
pygame.draw.circle(screen, (0,0,0), line, 5, 0)
pygame.display.flip()
from_file = False
elif event.type == pygame.MOUSEBUTTONDOWN and state == 0:
#print "Agregando la posición del click", pygame.mouse.get_pos()
graph.append(pygame.mouse.get_pos())
pygame.draw.circle(screen, (0,0,0), pygame.mouse.get_pos(), 5, 0)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
lock_file = open(CONST_LOCK_FILE, "w")
lock_file.write("0");
lock_file.close()
graph_file = open(CONST_GRAPH_FILE, "w")
graph_file.write("NAME : %s\n" % CONST_GRAPH_FILE)
graph_file.write("COMMENT : %s-city problem\n" % str(len(graph)))
graph_file.write("TYPE : TSP\n")
graph_file.write("DIMENSION : %s\n" % str(len(graph)))
graph_file.write("EDGE_WEIGHT_TYPE : EUC_2D\n")
graph_file.write("NODE_COORD_SECTION\n")
for x in range(0, len(graph)):
#print "%d %d %d" % (x, graph[x][0], graph[x][1])
graph_file.write("%d %d %d" % (x, graph[x][0], graph[x][1]))
graph_file.write("\n")
graph_file.write("EOF")
graph_file.close()
lock_file = open("lock.txt", "w")
lock_file.write("1");
lock_file.close()
# Primera salida.
tour = input() # [0, .., n-1, n]
cost = input() # Costo del recorrido
g = input() # Cantidad de iteraciones
lock_file = open("lock.txt", "w")
lock_file.write("0");
lock_file.close()
state = 1
if state == 1:
if tour != CONST_STOP:
pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (SOLVING...)")
screen.fill((255,255,255))
# Vuelve a dibujar los círculos
for i in graph:
pygame.draw.circle(screen, (255,0,0), i, 5, 0)
for i in range(0, len(tour)):
pygame.draw.line(screen, (255, 0, 0), graph[tour[i]], graph[tour[(i + 1) % len(tour)]])
pygame.display.flip()
# Salidas siguientes
tour = input()
if tour != CONST_STOP:
cost = input()
g = input()
else:
pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (FINISHED)")
finished = True
state = 2
if __name__ == '__main__':
if len(sys.argv) == 2:
CONST_CUSTOM_FILE = sys.argv[1]
main()
| 39.923077
| 145
| 0.509152
|
import pygame
import sys
import numpy as np
CONST_LOCK_FILE = "lock.txt"
CONST_GRAPH_FILE = "graph.tsp"
CONST_STOP = "STOP"
CONST_CUSTOM_FILE = None
def main():
pygame.init()
screen = pygame.display.set_mode((700,700))
screen.fill((255,255,255))
pygame.display.set_caption("Ant Colony TSP Solver - press ENTER to solve")
graph = []
tour = []
cost = g = 0
state = 0
pygame.display.flip()
while (True):
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
print "El usuario ha decidido cerrar la aplicación."
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN and state == 0 and CONST_CUSTOM_FILE:
data = np.loadtxt(CONST_CUSTOM_FILE, dtype=int, delimiter=',')
for line in data:
line = (line[0]*7, line[1]*7)
graph.append(line)
pygame.draw.circle(screen, (0,0,0), line, 5, 0)
pygame.display.flip()
from_file = False
elif event.type == pygame.MOUSEBUTTONDOWN and state == 0:
graph.append(pygame.mouse.get_pos())
pygame.draw.circle(screen, (0,0,0), pygame.mouse.get_pos(), 5, 0)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
lock_file = open(CONST_LOCK_FILE, "w")
lock_file.write("0");
lock_file.close()
graph_file = open(CONST_GRAPH_FILE, "w")
graph_file.write("NAME : %s\n" % CONST_GRAPH_FILE)
graph_file.write("COMMENT : %s-city problem\n" % str(len(graph)))
graph_file.write("TYPE : TSP\n")
graph_file.write("DIMENSION : %s\n" % str(len(graph)))
graph_file.write("EDGE_WEIGHT_TYPE : EUC_2D\n")
graph_file.write("NODE_COORD_SECTION\n")
for x in range(0, len(graph)):
graph_file.write("%d %d %d" % (x, graph[x][0], graph[x][1]))
graph_file.write("\n")
graph_file.write("EOF")
graph_file.close()
lock_file = open("lock.txt", "w")
lock_file.write("1");
lock_file.close()
tour = input()
cost = input()
g = input()
lock_file = open("lock.txt", "w")
lock_file.write("0");
lock_file.close()
state = 1
if state == 1:
if tour != CONST_STOP:
pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (SOLVING...)")
screen.fill((255,255,255))
for i in graph:
pygame.draw.circle(screen, (255,0,0), i, 5, 0)
for i in range(0, len(tour)):
pygame.draw.line(screen, (255, 0, 0), graph[tour[i]], graph[tour[(i + 1) % len(tour)]])
pygame.display.flip()
tour = input()
if tour != CONST_STOP:
cost = input()
g = input()
else:
pygame.display.set_caption("Ant Colony TSP Solver - current length: " + str(cost) + " | iterations: " + str(g) + " (FINISHED)")
finished = True
state = 2
if __name__ == '__main__':
if len(sys.argv) == 2:
CONST_CUSTOM_FILE = sys.argv[1]
main()
| false
| true
|
790402b801341eea86957f16f618826e2f8c3afb
| 6,444
|
py
|
Python
|
neutron/conf/policies/security_group.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 1,080
|
2015-01-04T08:35:00.000Z
|
2022-03-27T09:15:52.000Z
|
neutron/conf/policies/security_group.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 24
|
2015-02-21T01:48:28.000Z
|
2021-11-26T02:38:56.000Z
|
neutron/conf/policies/security_group.py
|
congnt95/neutron
|
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
|
[
"Apache-2.0"
] | 1,241
|
2015-01-02T10:47:10.000Z
|
2022-03-27T09:42:23.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from neutron.conf.policies import base
DEPRECATED_REASON = (
"The security group API now supports system scope and default roles.")
SG_COLLECTION_PATH = '/security-groups'
SG_RESOURCE_PATH = '/security-groups/{id}'
RULE_COLLECTION_PATH = '/security-group-rules'
RULE_RESOURCE_PATH = '/security-group-rules/{id}'
RULE_ADMIN_OR_SG_OWNER = 'rule:admin_or_sg_owner'
RULE_ADMIN_OWNER_OR_SG_OWNER = 'rule:admin_owner_or_sg_owner'
rules = [
policy.RuleDefault(
name='admin_or_sg_owner',
check_str=base.policy_or(
'rule:context_is_admin',
'tenant_id:%(security_group:tenant_id)s'),
description='Rule for admin or security group owner access'),
policy.RuleDefault(
name='admin_owner_or_sg_owner',
check_str=base.policy_or(
'rule:owner',
RULE_ADMIN_OR_SG_OWNER),
description=('Rule for resource owner, '
'admin or security group owner access')),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group?
policy.DocumentedRuleDefault(
name='create_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group',
operations=[
{
'method': 'POST',
'path': SG_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description='Get a security group',
operations=[
{
'method': 'GET',
'path': SG_COLLECTION_PATH,
},
{
'method': 'GET',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group',
check_str=base.RULE_ANY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='update_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Update a security group',
operations=[
{
'method': 'PUT',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='update_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group',
operations=[
{
'method': 'DELETE',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group_rule?
policy.DocumentedRuleDefault(
name='create_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group rule',
operations=[
{
'method': 'POST',
'path': RULE_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group_rule',
check_str=base.policy_or(
base.SYSTEM_OR_PROJECT_READER,
base.RULE_SG_OWNER),
scope_types=['system', 'project'],
description='Get a security group rule',
operations=[
{
'method': 'GET',
'path': RULE_COLLECTION_PATH,
},
{
'method': 'GET',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group_rule',
check_str=RULE_ADMIN_OWNER_OR_SG_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group rule',
operations=[
{
'method': 'DELETE',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
]
def list_rules():
return rules
| 35.021739
| 76
| 0.620112
|
from oslo_log import versionutils
from oslo_policy import policy
from neutron.conf.policies import base
DEPRECATED_REASON = (
"The security group API now supports system scope and default roles.")
SG_COLLECTION_PATH = '/security-groups'
SG_RESOURCE_PATH = '/security-groups/{id}'
RULE_COLLECTION_PATH = '/security-group-rules'
RULE_RESOURCE_PATH = '/security-group-rules/{id}'
RULE_ADMIN_OR_SG_OWNER = 'rule:admin_or_sg_owner'
RULE_ADMIN_OWNER_OR_SG_OWNER = 'rule:admin_owner_or_sg_owner'
rules = [
policy.RuleDefault(
name='admin_or_sg_owner',
check_str=base.policy_or(
'rule:context_is_admin',
'tenant_id:%(security_group:tenant_id)s'),
description='Rule for admin or security group owner access'),
policy.RuleDefault(
name='admin_owner_or_sg_owner',
check_str=base.policy_or(
'rule:owner',
RULE_ADMIN_OR_SG_OWNER),
description=('Rule for resource owner, '
'admin or security group owner access')),
policy.DocumentedRuleDefault(
name='create_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group',
operations=[
{
'method': 'POST',
'path': SG_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description='Get a security group',
operations=[
{
'method': 'GET',
'path': SG_COLLECTION_PATH,
},
{
'method': 'GET',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group',
check_str=base.RULE_ANY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='update_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Update a security group',
operations=[
{
'method': 'PUT',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='update_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group',
operations=[
{
'method': 'DELETE',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='create_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group rule',
operations=[
{
'method': 'POST',
'path': RULE_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group_rule',
check_str=base.policy_or(
base.SYSTEM_OR_PROJECT_READER,
base.RULE_SG_OWNER),
scope_types=['system', 'project'],
description='Get a security group rule',
operations=[
{
'method': 'GET',
'path': RULE_COLLECTION_PATH,
},
{
'method': 'GET',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group_rule',
check_str=RULE_ADMIN_OWNER_OR_SG_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group rule',
operations=[
{
'method': 'DELETE',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
]
def list_rules():
return rules
| true
| true
|
79040342f793b61a5f240c5a7ee2018396e91425
| 5,974
|
py
|
Python
|
test/unit/test_cypher_encoding.py
|
CyberGRX/py2neo
|
3e4a5a799761e6dd335e0d8e4b7d47cfff98f793
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_cypher_encoding.py
|
CyberGRX/py2neo
|
3e4a5a799761e6dd335e0d8e4b7d47cfff98f793
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_cypher_encoding.py
|
CyberGRX/py2neo
|
3e4a5a799761e6dd335e0d8e4b7d47cfff98f793
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from neotime import Date, Time, DateTime, Duration
from py2neo.data import Node
from py2neo.cypher import cypher_escape, cypher_repr
from py2neo.cypher.encoding import LabelSetView, PropertyDictView, PropertySelector
class LabelSetViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = LabelSetView([])
self.assertEqual(repr(view), "")
def test_can_create_single_label_view(self):
view = LabelSetView(["A"])
self.assertEqual(repr(view), ":A")
def test_can_create_double_label_view(self):
view = LabelSetView(["A", "B"])
self.assertEqual(repr(view), ":A:B")
def test_can_select_existing_in_view(self):
view = LabelSetView(["A", "B"]).B
self.assertEqual(repr(view), ":B")
def test_can_select_non_existing_in_view(self):
view = LabelSetView(["A", "B"]).C
self.assertEqual(repr(view), "")
def test_can_chain_select(self):
view = LabelSetView(["A", "B", "C"]).B.C
self.assertEqual(repr(view), ":B:C")
def test_can_reselect_same(self):
view = LabelSetView(["A", "B", "C"]).B.B.C
self.assertEqual(repr(view), ":B:C")
def test_length(self):
view = LabelSetView(["A", "B", "C"])
self.assertEqual(len(view), 3)
def test_iterable(self):
view = LabelSetView(["A", "B", "C"])
self.assertSetEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertIn("A", view)
def test_non_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertNotIn("D", view)
class PropertyDictViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = PropertyDictView({})
self.assertEqual(repr(view), "{}")
def test_can_create_single_property_view(self):
view = PropertyDictView({"A": 1})
self.assertEqual(repr(view), "{A: 1}")
def test_can_create_double_property_view(self):
view = PropertyDictView({"A": 1, "B": 2})
self.assertEqual(repr(view), "{A: 1, B: 2}")
def test_can_select_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).B
self.assertEqual(repr(view), "{B: 2}")
def test_can_select_non_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).C
self.assertEqual(repr(view), "{}")
def test_can_chain_select(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_can_reselect_same(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_length(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(len(view), 3)
def test_iterable(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertIn("A", view)
def test_non_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertNotIn("D", view)
class PropertySelectorTestCase(TestCase):
def test_simple(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.A, "1")
def test_non_existent(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.D, "null")
class NodeReprTestCase(TestCase):
def test_empty(self):
a = Node()
r = cypher_repr(a)
self.assertEqual("({})", r)
def test_single_property(self):
a = Node(name="Alice")
r = cypher_repr(a)
self.assertEqual("({name: 'Alice'})", r)
def test_property_and_label(self):
a = Node("Person", name="Alice")
r = cypher_repr(a)
self.assertEqual("(:Person {name: 'Alice'})", r)
def test_date_property(self):
a = Node(d=Date(1970, 1, 1))
r = cypher_repr(a)
self.assertEqual("({d: date('1970-01-01')})", r)
def test_time_property(self):
a = Node(t=Time(12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({t: time('12:34:56.000000000')})", r)
def test_datetime_property(self):
a = Node(dt=DateTime(1970, 1, 1, 12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({dt: datetime('1970-01-01T12:34:56.000000000')})", r)
def test_duration_property(self):
a = Node(dur=Duration(days=3))
r = cypher_repr(a)
self.assertEqual("({dur: duration('P3D')})", r)
class CypherEscapeTestCase(TestCase):
def test_empty_string(self):
value = ""
with self.assertRaises(ValueError):
_ = cypher_escape(value)
def test_simple_string(self):
value = "foo"
escaped = "foo"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_space(self):
value = "foo bar"
escaped = "`foo bar`"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_backtick(self):
value = "foo `bar`"
escaped = "`foo ``bar```"
self.assertEqual(escaped, cypher_escape(value))
| 31.114583
| 83
| 0.61165
|
from unittest import TestCase
from neotime import Date, Time, DateTime, Duration
from py2neo.data import Node
from py2neo.cypher import cypher_escape, cypher_repr
from py2neo.cypher.encoding import LabelSetView, PropertyDictView, PropertySelector
class LabelSetViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = LabelSetView([])
self.assertEqual(repr(view), "")
def test_can_create_single_label_view(self):
view = LabelSetView(["A"])
self.assertEqual(repr(view), ":A")
def test_can_create_double_label_view(self):
view = LabelSetView(["A", "B"])
self.assertEqual(repr(view), ":A:B")
def test_can_select_existing_in_view(self):
view = LabelSetView(["A", "B"]).B
self.assertEqual(repr(view), ":B")
def test_can_select_non_existing_in_view(self):
view = LabelSetView(["A", "B"]).C
self.assertEqual(repr(view), "")
def test_can_chain_select(self):
view = LabelSetView(["A", "B", "C"]).B.C
self.assertEqual(repr(view), ":B:C")
def test_can_reselect_same(self):
view = LabelSetView(["A", "B", "C"]).B.B.C
self.assertEqual(repr(view), ":B:C")
def test_length(self):
view = LabelSetView(["A", "B", "C"])
self.assertEqual(len(view), 3)
def test_iterable(self):
view = LabelSetView(["A", "B", "C"])
self.assertSetEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertIn("A", view)
def test_non_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertNotIn("D", view)
class PropertyDictViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = PropertyDictView({})
self.assertEqual(repr(view), "{}")
def test_can_create_single_property_view(self):
view = PropertyDictView({"A": 1})
self.assertEqual(repr(view), "{A: 1}")
def test_can_create_double_property_view(self):
view = PropertyDictView({"A": 1, "B": 2})
self.assertEqual(repr(view), "{A: 1, B: 2}")
def test_can_select_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).B
self.assertEqual(repr(view), "{B: 2}")
def test_can_select_non_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).C
self.assertEqual(repr(view), "{}")
def test_can_chain_select(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_can_reselect_same(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_length(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(len(view), 3)
def test_iterable(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertIn("A", view)
def test_non_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertNotIn("D", view)
class PropertySelectorTestCase(TestCase):
def test_simple(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.A, "1")
def test_non_existent(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.D, "null")
class NodeReprTestCase(TestCase):
def test_empty(self):
a = Node()
r = cypher_repr(a)
self.assertEqual("({})", r)
def test_single_property(self):
a = Node(name="Alice")
r = cypher_repr(a)
self.assertEqual("({name: 'Alice'})", r)
def test_property_and_label(self):
a = Node("Person", name="Alice")
r = cypher_repr(a)
self.assertEqual("(:Person {name: 'Alice'})", r)
def test_date_property(self):
a = Node(d=Date(1970, 1, 1))
r = cypher_repr(a)
self.assertEqual("({d: date('1970-01-01')})", r)
def test_time_property(self):
a = Node(t=Time(12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({t: time('12:34:56.000000000')})", r)
def test_datetime_property(self):
a = Node(dt=DateTime(1970, 1, 1, 12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({dt: datetime('1970-01-01T12:34:56.000000000')})", r)
def test_duration_property(self):
a = Node(dur=Duration(days=3))
r = cypher_repr(a)
self.assertEqual("({dur: duration('P3D')})", r)
class CypherEscapeTestCase(TestCase):
def test_empty_string(self):
value = ""
with self.assertRaises(ValueError):
_ = cypher_escape(value)
def test_simple_string(self):
value = "foo"
escaped = "foo"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_space(self):
value = "foo bar"
escaped = "`foo bar`"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_backtick(self):
value = "foo `bar`"
escaped = "`foo ``bar```"
self.assertEqual(escaped, cypher_escape(value))
| true
| true
|
790403ce5ce41a285ed6bfe1458f484eb6c52178
| 879
|
py
|
Python
|
statarb/src/python/bin/get_calcres_files.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 51
|
2019-02-01T19:43:37.000Z
|
2022-03-16T09:07:03.000Z
|
statarb/src/python/bin/get_calcres_files.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 2
|
2019-02-23T18:54:22.000Z
|
2019-11-09T01:30:32.000Z
|
statarb/src/python/bin/get_calcres_files.py
|
mikimaus78/ml_monorepo
|
b2c2627ff0e86e27f6829170d0dac168d8e5783b
|
[
"BSD-3-Clause"
] | 35
|
2019-02-08T02:00:31.000Z
|
2022-03-01T23:17:00.000Z
|
#!/usr/bin/env python
import glob
import re
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--calctime", dest="calctime", type=int, default=1400)
parser.add_option("-s", "--startdate", dest="startdate", type=int)
parser.add_option("-r", "--rootdir", dest="rootdir", default="/apps/multex/trade/run/live-prod")
(options, args) = parser.parse_args()
directories = glob.glob(options.rootdir + "/2010/*/*/calcres")
directories.sort()
for adir in directories:
fs = [g for g in glob.glob(adir + "/calcres.*.txt.gz")]
fs.sort()
found = False
for file in fs:
if found: continue
match = re.search('.*calcres\.(\d+)_(\d+)\.txt\.gz', file)
if match:
if float(match.group(1)) > options.startdate and float(match.group(2)) > options.calctime:
print file
found = True
| 30.310345
| 102
| 0.632537
|
import glob
import re
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--calctime", dest="calctime", type=int, default=1400)
parser.add_option("-s", "--startdate", dest="startdate", type=int)
parser.add_option("-r", "--rootdir", dest="rootdir", default="/apps/multex/trade/run/live-prod")
(options, args) = parser.parse_args()
directories = glob.glob(options.rootdir + "/2010/*/*/calcres")
directories.sort()
for adir in directories:
fs = [g for g in glob.glob(adir + "/calcres.*.txt.gz")]
fs.sort()
found = False
for file in fs:
if found: continue
match = re.search('.*calcres\.(\d+)_(\d+)\.txt\.gz', file)
if match:
if float(match.group(1)) > options.startdate and float(match.group(2)) > options.calctime:
print file
found = True
| false
| true
|
7904047e8c69111e72f0d3f569feb45a35bdf839
| 60,806
|
py
|
Python
|
venv/lib/python3.8/site-packages/azure/mgmt/containerservice/v2019_02_01/models/_models.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azure/mgmt/containerservice/v2019_02_01/models/_models.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/azure/mgmt/containerservice/v2019_02_01/models/_models.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class SubResource(Model):
"""Reference to another subresource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar type: Resource type
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgentPool(SubResource):
"""Agent Pool.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param agent_pool_type: AgentPoolType represents types of an agent pool.
Possible values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type agent_pool_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'count': {'key': 'properties.count', 'type': 'int'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'properties.osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'properties.vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'properties.maxPods', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'max_count': {'key': 'properties.maxCount', 'type': 'int'},
'min_count': {'key': 'properties.minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'properties.enableAutoScaling', 'type': 'bool'},
'agent_pool_type': {'key': 'properties.type', 'type': 'str'},
'orchestrator_version': {'key': 'properties.orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'availability_zones': {'key': 'properties.availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(AgentPool, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.agent_pool_type = kwargs.get('agent_pool_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class CloudError(Model):
"""An error response from the Container service.
:param error: Details about the error.
:type error:
~azure.mgmt.containerservice.v2019_02_01.models.CloudErrorBody
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'CloudErrorBody'},
}
def __init__(self, **kwargs):
super(CloudError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class CloudErrorException(HttpOperationError):
"""Server responsed with exception of type: 'CloudError'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class CloudErrorBody(Model):
"""An error response from the Container service.
:param code: An identifier for the error. Codes are invariant and are
intended to be consumed programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable
for display in a user interface.
:type message: str
:param target: The target of the particular error. For example, the name
of the property in error.
:type target: str
:param details: A list of additional details about the error.
:type details:
list[~azure.mgmt.containerservice.v2019_02_01.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(self, **kwargs):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class ContainerServiceDiagnosticsProfile(Model):
"""Profile for diagnostics on the container service cluster.
All required parameters must be populated in order to send to Azure.
:param vm_diagnostics: Required. Profile for diagnostics on the container
service VMs.
:type vm_diagnostics:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMDiagnostics
"""
_validation = {
'vm_diagnostics': {'required': True},
}
_attribute_map = {
'vm_diagnostics': {'key': 'vmDiagnostics', 'type': 'ContainerServiceVMDiagnostics'},
}
def __init__(self, **kwargs):
super(ContainerServiceDiagnosticsProfile, self).__init__(**kwargs)
self.vm_diagnostics = kwargs.get('vm_diagnostics', None)
class ContainerServiceLinuxProfile(Model):
"""Profile for Linux VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for
Linux VMs.
:type admin_username: str
:param ssh: Required. SSH configuration for Linux-based VMs running on
Azure.
:type ssh:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceSshConfiguration
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'},
'ssh': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'ssh': {'key': 'ssh', 'type': 'ContainerServiceSshConfiguration'},
}
def __init__(self, **kwargs):
super(ContainerServiceLinuxProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.ssh = kwargs.get('ssh', None)
class ContainerServiceMasterProfile(Model):
"""Profile for the container service master.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Number of masters (VMs) in the container service cluster.
Allowed values are 1, 3, and 5. The default value is 1. Default value: 1 .
:type count: int
:param dns_prefix: Required. DNS prefix to be used to create the FQDN for
the master pool.
:type dns_prefix: str
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param first_consecutive_static_ip: FirstConsecutiveStaticIP used to
specify the first static ip of masters. Default value: "10.240.255.5" .
:type first_consecutive_static_ip: str
:param storage_profile: Storage profile specifies what kind of storage
used. Choose from StorageAccount and ManagedDisks. Leave it empty, we will
choose for you based on the orchestrator choice. Possible values include:
'StorageAccount', 'ManagedDisks'
:type storage_profile: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceStorageProfileTypes
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
"""
_validation = {
'dns_prefix': {'required': True},
'vm_size': {'required': True},
'fqdn': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'dns_prefix': {'key': 'dnsPrefix', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'first_consecutive_static_ip': {'key': 'firstConsecutiveStaticIP', 'type': 'str'},
'storage_profile': {'key': 'storageProfile', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceMasterProfile, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.first_consecutive_static_ip = kwargs.get('first_consecutive_static_ip', "10.240.255.5")
self.storage_profile = kwargs.get('storage_profile', None)
self.fqdn = None
class ContainerServiceNetworkProfile(Model):
"""Profile of network configuration.
:param network_plugin: Network plugin used for building Kubernetes
network. Possible values include: 'azure', 'kubenet'. Default value:
"kubenet" .
:type network_plugin: str or
~azure.mgmt.containerservice.v2019_02_01.models.NetworkPlugin
:param network_policy: Network policy used for building Kubernetes
network. Possible values include: 'calico', 'azure'
:type network_policy: str or
~azure.mgmt.containerservice.v2019_02_01.models.NetworkPolicy
:param pod_cidr: A CIDR notation IP range from which to assign pod IPs
when kubenet is used. Default value: "10.244.0.0/16" .
:type pod_cidr: str
:param service_cidr: A CIDR notation IP range from which to assign service
cluster IPs. It must not overlap with any Subnet IP ranges. Default value:
"10.0.0.0/16" .
:type service_cidr: str
:param dns_service_ip: An IP address assigned to the Kubernetes DNS
service. It must be within the Kubernetes service address range specified
in serviceCidr. Default value: "10.0.0.10" .
:type dns_service_ip: str
:param docker_bridge_cidr: A CIDR notation IP range assigned to the Docker
bridge network. It must not overlap with any Subnet IP ranges or the
Kubernetes service address range. Default value: "172.17.0.1/16" .
:type docker_bridge_cidr: str
"""
_validation = {
'pod_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'network_plugin': {'key': 'networkPlugin', 'type': 'str'},
'network_policy': {'key': 'networkPolicy', 'type': 'str'},
'pod_cidr': {'key': 'podCidr', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceNetworkProfile, self).__init__(**kwargs)
self.network_plugin = kwargs.get('network_plugin', "kubenet")
self.network_policy = kwargs.get('network_policy', None)
self.pod_cidr = kwargs.get('pod_cidr', "10.244.0.0/16")
self.service_cidr = kwargs.get('service_cidr', "10.0.0.0/16")
self.dns_service_ip = kwargs.get('dns_service_ip', "10.0.0.10")
self.docker_bridge_cidr = kwargs.get('docker_bridge_cidr', "172.17.0.1/16")
class ContainerServiceSshConfiguration(Model):
"""SSH configuration for Linux-based VMs running on Azure.
All required parameters must be populated in order to send to Azure.
:param public_keys: Required. The list of SSH public keys used to
authenticate with Linux-based VMs. Only expect one key specified.
:type public_keys:
list[~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceSshPublicKey]
"""
_validation = {
'public_keys': {'required': True},
}
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[ContainerServiceSshPublicKey]'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshConfiguration, self).__init__(**kwargs)
self.public_keys = kwargs.get('public_keys', None)
class ContainerServiceSshPublicKey(Model):
"""Contains information about SSH certificate public key data.
All required parameters must be populated in order to send to Azure.
:param key_data: Required. Certificate public key used to authenticate
with VMs through SSH. The certificate must be in PEM format with or
without headers.
:type key_data: str
"""
_validation = {
'key_data': {'required': True},
}
_attribute_map = {
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshPublicKey, self).__init__(**kwargs)
self.key_data = kwargs.get('key_data', None)
class ContainerServiceVMDiagnostics(Model):
"""Profile for diagnostics on the container service VMs.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the VM diagnostic agent is provisioned
on the VM.
:type enabled: bool
:ivar storage_uri: The URI of the storage account where diagnostics are
stored.
:vartype storage_uri: str
"""
_validation = {
'enabled': {'required': True},
'storage_uri': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceVMDiagnostics, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.storage_uri = None
class ContainerServiceWindowsProfile(Model):
"""Profile for Windows VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for
Windows VMs.
:type admin_username: str
:param admin_password: Required. The administrator password to use for
Windows VMs.
:type admin_password: str
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$'},
'admin_password': {'required': True, 'pattern': r'^(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%\^&\*\(\)])[a-zA-Z\d!@#$%\^&\*\(\)]{12,123}$'},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceWindowsProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.admin_password = kwargs.get('admin_password', None)
class CredentialResult(Model):
"""The credential result response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: The name of the credential.
:vartype name: str
:ivar value: Base64-encoded Kubernetes configuration file.
:vartype value: bytearray
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(CredentialResult, self).__init__(**kwargs)
self.name = None
self.value = None
class CredentialResults(Model):
"""The list of credential result response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar kubeconfigs: Base64-encoded Kubernetes configuration file.
:vartype kubeconfigs:
list[~azure.mgmt.containerservice.v2019_02_01.models.CredentialResult]
"""
_validation = {
'kubeconfigs': {'readonly': True},
}
_attribute_map = {
'kubeconfigs': {'key': 'kubeconfigs', 'type': '[CredentialResult]'},
}
def __init__(self, **kwargs):
super(CredentialResults, self).__init__(**kwargs)
self.kubeconfigs = None
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ManagedCluster(Resource):
"""Managed cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param kubernetes_version: Version of Kubernetes specified when creating
the managed cluster.
:type kubernetes_version: str
:param dns_prefix: DNS prefix specified when creating the managed cluster.
:type dns_prefix: str
:ivar fqdn: FQDN for the master pool.
:vartype fqdn: str
:param agent_pool_profiles: Properties of the agent pool.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAgentPoolProfile]
:param linux_profile: Profile for Linux VMs in the container service
cluster.
:type linux_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceLinuxProfile
:param service_principal_profile: Information about a service principal
identity for the cluster to use for manipulating Azure APIs.
:type service_principal_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterServicePrincipalProfile
:param addon_profiles: Profile of managed cluster add-on.
:type addon_profiles: dict[str,
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAddonProfile]
:ivar node_resource_group: Name of the resource group containing agent
pool nodes.
:vartype node_resource_group: str
:param enable_rbac: Whether to enable Kubernetes Role-Based Access
Control.
:type enable_rbac: bool
:param enable_pod_security_policy: (DEPRECATING) Whether to enable
Kubernetes pod security policy (preview). This feature is set for removal
on October 15th, 2020. Learn more at aka.ms/aks/azpodpolicy.
:type enable_pod_security_policy: bool
:param network_profile: Profile of network configuration.
:type network_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceNetworkProfile
:param aad_profile: Profile of Azure Active Directory configuration.
:type aad_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterAADProfile
:param api_server_authorized_ip_ranges: (PREVIEW) Authorized IP Ranges to
kubernetes API server.
:type api_server_authorized_ip_ranges: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'fqdn': {'readonly': True},
'node_resource_group': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'enable_pod_security_policy': {'key': 'properties.enablePodSecurityPolicy', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'api_server_authorized_ip_ranges': {'key': 'properties.apiServerAuthorizedIPRanges', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedCluster, self).__init__(**kwargs)
self.provisioning_state = None
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.fqdn = None
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
self.linux_profile = kwargs.get('linux_profile', None)
self.service_principal_profile = kwargs.get('service_principal_profile', None)
self.addon_profiles = kwargs.get('addon_profiles', None)
self.node_resource_group = None
self.enable_rbac = kwargs.get('enable_rbac', None)
self.enable_pod_security_policy = kwargs.get('enable_pod_security_policy', None)
self.network_profile = kwargs.get('network_profile', None)
self.aad_profile = kwargs.get('aad_profile', None)
self.api_server_authorized_ip_ranges = kwargs.get('api_server_authorized_ip_ranges', None)
class ManagedClusterAADProfile(Model):
"""AADProfile specifies attributes for Azure Active Directory integration.
All required parameters must be populated in order to send to Azure.
:param client_app_id: Required. The client AAD application ID.
:type client_app_id: str
:param server_app_id: Required. The server AAD application ID.
:type server_app_id: str
:param server_app_secret: The server AAD application secret.
:type server_app_secret: str
:param tenant_id: The AAD tenant ID to use for authentication. If not
specified, will use the tenant of the deployment subscription.
:type tenant_id: str
"""
_validation = {
'client_app_id': {'required': True},
'server_app_id': {'required': True},
}
_attribute_map = {
'client_app_id': {'key': 'clientAppID', 'type': 'str'},
'server_app_id': {'key': 'serverAppID', 'type': 'str'},
'server_app_secret': {'key': 'serverAppSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantID', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAADProfile, self).__init__(**kwargs)
self.client_app_id = kwargs.get('client_app_id', None)
self.server_app_id = kwargs.get('server_app_id', None)
self.server_app_secret = kwargs.get('server_app_secret', None)
self.tenant_id = kwargs.get('tenant_id', None)
class ManagedClusterAccessProfile(Resource):
"""Managed cluster Access Profile.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Required. Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param kube_config: Base64-encoded Kubernetes configuration file.
:type kube_config: bytearray
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kube_config': {'key': 'properties.kubeConfig', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(ManagedClusterAccessProfile, self).__init__(**kwargs)
self.kube_config = kwargs.get('kube_config', None)
class ManagedClusterAddonProfile(Model):
"""A Kubernetes add-on profile for a managed cluster.
All required parameters must be populated in order to send to Azure.
:param enabled: Required. Whether the add-on is enabled or not.
:type enabled: bool
:param config: Key-value pairs for configuring an add-on.
:type config: dict[str, str]
"""
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.config = kwargs.get('config', None)
class ManagedClusterAgentPoolProfileProperties(Model):
"""Properties for the container service agent pool profile.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible
values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
"""
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfileProperties, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.type = kwargs.get('type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties):
"""Profile for the container service agent pool.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param count: Required. Number of agents (VMs) to host docker containers.
Allowed values must be in the range of 1 to 100 (inclusive). The default
value is 1. . Default value: 1 .
:type count: int
:param vm_size: Required. Size of agent VMs. Possible values include:
'Standard_A1', 'Standard_A10', 'Standard_A11', 'Standard_A1_v2',
'Standard_A2', 'Standard_A2_v2', 'Standard_A2m_v2', 'Standard_A3',
'Standard_A4', 'Standard_A4_v2', 'Standard_A4m_v2', 'Standard_A5',
'Standard_A6', 'Standard_A7', 'Standard_A8', 'Standard_A8_v2',
'Standard_A8m_v2', 'Standard_A9', 'Standard_B2ms', 'Standard_B2s',
'Standard_B4ms', 'Standard_B8ms', 'Standard_D1', 'Standard_D11',
'Standard_D11_v2', 'Standard_D11_v2_Promo', 'Standard_D12',
'Standard_D12_v2', 'Standard_D12_v2_Promo', 'Standard_D13',
'Standard_D13_v2', 'Standard_D13_v2_Promo', 'Standard_D14',
'Standard_D14_v2', 'Standard_D14_v2_Promo', 'Standard_D15_v2',
'Standard_D16_v3', 'Standard_D16s_v3', 'Standard_D1_v2', 'Standard_D2',
'Standard_D2_v2', 'Standard_D2_v2_Promo', 'Standard_D2_v3',
'Standard_D2s_v3', 'Standard_D3', 'Standard_D32_v3', 'Standard_D32s_v3',
'Standard_D3_v2', 'Standard_D3_v2_Promo', 'Standard_D4', 'Standard_D4_v2',
'Standard_D4_v2_Promo', 'Standard_D4_v3', 'Standard_D4s_v3',
'Standard_D5_v2', 'Standard_D5_v2_Promo', 'Standard_D64_v3',
'Standard_D64s_v3', 'Standard_D8_v3', 'Standard_D8s_v3', 'Standard_DS1',
'Standard_DS11', 'Standard_DS11_v2', 'Standard_DS11_v2_Promo',
'Standard_DS12', 'Standard_DS12_v2', 'Standard_DS12_v2_Promo',
'Standard_DS13', 'Standard_DS13-2_v2', 'Standard_DS13-4_v2',
'Standard_DS13_v2', 'Standard_DS13_v2_Promo', 'Standard_DS14',
'Standard_DS14-4_v2', 'Standard_DS14-8_v2', 'Standard_DS14_v2',
'Standard_DS14_v2_Promo', 'Standard_DS15_v2', 'Standard_DS1_v2',
'Standard_DS2', 'Standard_DS2_v2', 'Standard_DS2_v2_Promo',
'Standard_DS3', 'Standard_DS3_v2', 'Standard_DS3_v2_Promo',
'Standard_DS4', 'Standard_DS4_v2', 'Standard_DS4_v2_Promo',
'Standard_DS5_v2', 'Standard_DS5_v2_Promo', 'Standard_E16_v3',
'Standard_E16s_v3', 'Standard_E2_v3', 'Standard_E2s_v3',
'Standard_E32-16s_v3', 'Standard_E32-8s_v3', 'Standard_E32_v3',
'Standard_E32s_v3', 'Standard_E4_v3', 'Standard_E4s_v3',
'Standard_E64-16s_v3', 'Standard_E64-32s_v3', 'Standard_E64_v3',
'Standard_E64s_v3', 'Standard_E8_v3', 'Standard_E8s_v3', 'Standard_F1',
'Standard_F16', 'Standard_F16s', 'Standard_F16s_v2', 'Standard_F1s',
'Standard_F2', 'Standard_F2s', 'Standard_F2s_v2', 'Standard_F32s_v2',
'Standard_F4', 'Standard_F4s', 'Standard_F4s_v2', 'Standard_F64s_v2',
'Standard_F72s_v2', 'Standard_F8', 'Standard_F8s', 'Standard_F8s_v2',
'Standard_G1', 'Standard_G2', 'Standard_G3', 'Standard_G4', 'Standard_G5',
'Standard_GS1', 'Standard_GS2', 'Standard_GS3', 'Standard_GS4',
'Standard_GS4-4', 'Standard_GS4-8', 'Standard_GS5', 'Standard_GS5-16',
'Standard_GS5-8', 'Standard_H16', 'Standard_H16m', 'Standard_H16mr',
'Standard_H16r', 'Standard_H8', 'Standard_H8m', 'Standard_L16s',
'Standard_L32s', 'Standard_L4s', 'Standard_L8s', 'Standard_M128-32ms',
'Standard_M128-64ms', 'Standard_M128ms', 'Standard_M128s',
'Standard_M64-16ms', 'Standard_M64-32ms', 'Standard_M64ms',
'Standard_M64s', 'Standard_NC12', 'Standard_NC12s_v2',
'Standard_NC12s_v3', 'Standard_NC24', 'Standard_NC24r',
'Standard_NC24rs_v2', 'Standard_NC24rs_v3', 'Standard_NC24s_v2',
'Standard_NC24s_v3', 'Standard_NC6', 'Standard_NC6s_v2',
'Standard_NC6s_v3', 'Standard_ND12s', 'Standard_ND24rs', 'Standard_ND24s',
'Standard_ND6s', 'Standard_NV12', 'Standard_NV24', 'Standard_NV6'
:type vm_size: str or
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceVMSizeTypes
:param os_disk_size_gb: OS Disk Size in GB to be used to specify the disk
size for every machine in this master/agent pool. If you specify 0, it
will apply the default osDisk size according to the vmSize specified.
:type os_disk_size_gb: int
:param vnet_subnet_id: VNet SubnetID specifies the VNet's subnet
identifier.
:type vnet_subnet_id: str
:param max_pods: Maximum number of pods that can run on a node.
:type max_pods: int
:param os_type: OsType to be used to specify os type. Choose from Linux
and Windows. Default to Linux. Possible values include: 'Linux',
'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param max_count: Maximum number of nodes for auto-scaling
:type max_count: int
:param min_count: Minimum number of nodes for auto-scaling
:type min_count: int
:param enable_auto_scaling: Whether to enable auto-scaler
:type enable_auto_scaling: bool
:param type: AgentPoolType represents types of an agent pool. Possible
values include: 'VirtualMachineScaleSets', 'AvailabilitySet'
:type type: str or
~azure.mgmt.containerservice.v2019_02_01.models.AgentPoolType
:param orchestrator_version: Version of orchestrator specified when
creating the managed cluster.
:type orchestrator_version: str
:ivar provisioning_state: The current deployment or provisioning state,
which only appears in the response.
:vartype provisioning_state: str
:param availability_zones: (PREVIEW) Availability zones for nodes. Must
use VirtualMachineScaleSets AgentPoolType.
:type availability_zones: list[str]
:param name: Required. Unique name of the agent pool profile in the
context of the subscription and resource group.
:type name: str
"""
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
'name': {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfile, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class ManagedClusterPoolUpgradeProfile(Model):
"""The list of available upgrade versions.
All required parameters must be populated in order to send to Azure.
:param kubernetes_version: Required. Kubernetes version (major, minor,
patch).
:type kubernetes_version: str
:param name: Pool name.
:type name: str
:param os_type: Required. OsType to be used to specify os type. Choose
from Linux and Windows. Default to Linux. Possible values include:
'Linux', 'Windows'. Default value: "Linux" .
:type os_type: str or
~azure.mgmt.containerservice.v2019_02_01.models.OSType
:param upgrades: List of orchestrator types and versions available for
upgrade.
:type upgrades: list[str]
"""
_validation = {
'kubernetes_version': {'required': True},
'os_type': {'required': True},
}
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'upgrades': {'key': 'upgrades', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterPoolUpgradeProfile, self).__init__(**kwargs)
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.name = kwargs.get('name', None)
self.os_type = kwargs.get('os_type', "Linux")
self.upgrades = kwargs.get('upgrades', None)
class ManagedClusterServicePrincipalProfile(Model):
"""Information about a service principal identity for the cluster to use for
manipulating Azure APIs.
All required parameters must be populated in order to send to Azure.
:param client_id: Required. The ID for the service principal.
:type client_id: str
:param secret: The secret password associated with the service principal
in plain text.
:type secret: str
"""
_validation = {
'client_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterServicePrincipalProfile, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.secret = kwargs.get('secret', None)
class ManagedClusterUpgradeProfile(Model):
"""The list of available upgrades for compute pools.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Id of upgrade profile.
:vartype id: str
:ivar name: Name of upgrade profile.
:vartype name: str
:ivar type: Type of upgrade profile.
:vartype type: str
:param control_plane_profile: Required. The list of available upgrade
versions for the control plane.
:type control_plane_profile:
~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterPoolUpgradeProfile
:param agent_pool_profiles: Required. The list of available upgrade
versions for agent pools.
:type agent_pool_profiles:
list[~azure.mgmt.containerservice.v2019_02_01.models.ManagedClusterPoolUpgradeProfile]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'control_plane_profile': {'required': True},
'agent_pool_profiles': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'},
}
def __init__(self, **kwargs):
super(ManagedClusterUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = kwargs.get('control_plane_profile', None)
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
class OperationValue(Model):
"""Describes the properties of a Compute Operation value.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar origin: The origin of the compute operation.
:vartype origin: str
:ivar name: The name of the compute operation.
:vartype name: str
:ivar operation: The display name of the compute operation.
:vartype operation: str
:ivar resource: The display name of the resource the operation applies to.
:vartype resource: str
:ivar description: The description of the operation.
:vartype description: str
:ivar provider: The resource provider for the operation.
:vartype provider: str
"""
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class OrchestratorProfile(Model):
"""Contains information about orchestrator.
All required parameters must be populated in order to send to Azure.
:param orchestrator_type: Required. Orchestrator type.
:type orchestrator_type: str
:param orchestrator_version: Required. Orchestrator version (major, minor,
patch).
:type orchestrator_version: str
"""
_validation = {
'orchestrator_type': {'required': True},
'orchestrator_version': {'required': True},
}
_attribute_map = {
'orchestrator_type': {'key': 'orchestratorType', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OrchestratorProfile, self).__init__(**kwargs)
self.orchestrator_type = kwargs.get('orchestrator_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
class TagsObject(Model):
"""Tags object for patch operations.
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TagsObject, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
| 43.776818
| 139
| 0.670526
|
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class SubResource(Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SubResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class AgentPool(SubResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'count': {'key': 'properties.count', 'type': 'int'},
'vm_size': {'key': 'properties.vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'properties.osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'properties.vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'properties.maxPods', 'type': 'int'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'max_count': {'key': 'properties.maxCount', 'type': 'int'},
'min_count': {'key': 'properties.minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'properties.enableAutoScaling', 'type': 'bool'},
'agent_pool_type': {'key': 'properties.type', 'type': 'str'},
'orchestrator_version': {'key': 'properties.orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'availability_zones': {'key': 'properties.availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(AgentPool, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.agent_pool_type = kwargs.get('agent_pool_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class CloudError(Model):
_attribute_map = {
'error': {'key': 'error', 'type': 'CloudErrorBody'},
}
def __init__(self, **kwargs):
super(CloudError, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class CloudErrorException(HttpOperationError):
def __init__(self, deserialize, response, *args):
super(CloudErrorException, self).__init__(deserialize, response, 'CloudError', *args)
class CloudErrorBody(Model):
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(self, **kwargs):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
class ContainerServiceDiagnosticsProfile(Model):
_validation = {
'vm_diagnostics': {'required': True},
}
_attribute_map = {
'vm_diagnostics': {'key': 'vmDiagnostics', 'type': 'ContainerServiceVMDiagnostics'},
}
def __init__(self, **kwargs):
super(ContainerServiceDiagnosticsProfile, self).__init__(**kwargs)
self.vm_diagnostics = kwargs.get('vm_diagnostics', None)
class ContainerServiceLinuxProfile(Model):
_validation = {
'admin_username': {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'},
'ssh': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'ssh': {'key': 'ssh', 'type': 'ContainerServiceSshConfiguration'},
}
def __init__(self, **kwargs):
super(ContainerServiceLinuxProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.ssh = kwargs.get('ssh', None)
class ContainerServiceMasterProfile(Model):
_validation = {
'dns_prefix': {'required': True},
'vm_size': {'required': True},
'fqdn': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'dns_prefix': {'key': 'dnsPrefix', 'type': 'str'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'first_consecutive_static_ip': {'key': 'firstConsecutiveStaticIP', 'type': 'str'},
'storage_profile': {'key': 'storageProfile', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceMasterProfile, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.first_consecutive_static_ip = kwargs.get('first_consecutive_static_ip', "10.240.255.5")
self.storage_profile = kwargs.get('storage_profile', None)
self.fqdn = None
class ContainerServiceNetworkProfile(Model):
_validation = {
'pod_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'service_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
'dns_service_ip': {'pattern': r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$'},
'docker_bridge_cidr': {'pattern': r'^([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?$'},
}
_attribute_map = {
'network_plugin': {'key': 'networkPlugin', 'type': 'str'},
'network_policy': {'key': 'networkPolicy', 'type': 'str'},
'pod_cidr': {'key': 'podCidr', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'dns_service_ip': {'key': 'dnsServiceIP', 'type': 'str'},
'docker_bridge_cidr': {'key': 'dockerBridgeCidr', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceNetworkProfile, self).__init__(**kwargs)
self.network_plugin = kwargs.get('network_plugin', "kubenet")
self.network_policy = kwargs.get('network_policy', None)
self.pod_cidr = kwargs.get('pod_cidr', "10.244.0.0/16")
self.service_cidr = kwargs.get('service_cidr', "10.0.0.0/16")
self.dns_service_ip = kwargs.get('dns_service_ip', "10.0.0.10")
self.docker_bridge_cidr = kwargs.get('docker_bridge_cidr', "172.17.0.1/16")
class ContainerServiceSshConfiguration(Model):
_validation = {
'public_keys': {'required': True},
}
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[ContainerServiceSshPublicKey]'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshConfiguration, self).__init__(**kwargs)
self.public_keys = kwargs.get('public_keys', None)
class ContainerServiceSshPublicKey(Model):
_validation = {
'key_data': {'required': True},
}
_attribute_map = {
'key_data': {'key': 'keyData', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceSshPublicKey, self).__init__(**kwargs)
self.key_data = kwargs.get('key_data', None)
class ContainerServiceVMDiagnostics(Model):
_validation = {
'enabled': {'required': True},
'storage_uri': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'storage_uri': {'key': 'storageUri', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceVMDiagnostics, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.storage_uri = None
class ContainerServiceWindowsProfile(Model):
_validation = {
'admin_username': {'required': True, 'pattern': r'^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$'},
'admin_password': {'required': True, 'pattern': r'^(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%\^&\*\(\)])[a-zA-Z\d!@#$%\^&\*\(\)]{12,123}$'},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'admin_password': {'key': 'adminPassword', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ContainerServiceWindowsProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.admin_password = kwargs.get('admin_password', None)
class CredentialResult(Model):
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(CredentialResult, self).__init__(**kwargs)
self.name = None
self.value = None
class CredentialResults(Model):
_validation = {
'kubeconfigs': {'readonly': True},
}
_attribute_map = {
'kubeconfigs': {'key': 'kubeconfigs', 'type': '[CredentialResult]'},
}
def __init__(self, **kwargs):
super(CredentialResults, self).__init__(**kwargs)
self.kubeconfigs = None
class Resource(Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class ManagedCluster(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'fqdn': {'readonly': True},
'node_resource_group': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'dns_prefix': {'key': 'properties.dnsPrefix', 'type': 'str'},
'fqdn': {'key': 'properties.fqdn', 'type': 'str'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterAgentPoolProfile]'},
'linux_profile': {'key': 'properties.linuxProfile', 'type': 'ContainerServiceLinuxProfile'},
'service_principal_profile': {'key': 'properties.servicePrincipalProfile', 'type': 'ManagedClusterServicePrincipalProfile'},
'addon_profiles': {'key': 'properties.addonProfiles', 'type': '{ManagedClusterAddonProfile}'},
'node_resource_group': {'key': 'properties.nodeResourceGroup', 'type': 'str'},
'enable_rbac': {'key': 'properties.enableRBAC', 'type': 'bool'},
'enable_pod_security_policy': {'key': 'properties.enablePodSecurityPolicy', 'type': 'bool'},
'network_profile': {'key': 'properties.networkProfile', 'type': 'ContainerServiceNetworkProfile'},
'aad_profile': {'key': 'properties.aadProfile', 'type': 'ManagedClusterAADProfile'},
'api_server_authorized_ip_ranges': {'key': 'properties.apiServerAuthorizedIPRanges', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedCluster, self).__init__(**kwargs)
self.provisioning_state = None
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.dns_prefix = kwargs.get('dns_prefix', None)
self.fqdn = None
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
self.linux_profile = kwargs.get('linux_profile', None)
self.service_principal_profile = kwargs.get('service_principal_profile', None)
self.addon_profiles = kwargs.get('addon_profiles', None)
self.node_resource_group = None
self.enable_rbac = kwargs.get('enable_rbac', None)
self.enable_pod_security_policy = kwargs.get('enable_pod_security_policy', None)
self.network_profile = kwargs.get('network_profile', None)
self.aad_profile = kwargs.get('aad_profile', None)
self.api_server_authorized_ip_ranges = kwargs.get('api_server_authorized_ip_ranges', None)
class ManagedClusterAADProfile(Model):
_validation = {
'client_app_id': {'required': True},
'server_app_id': {'required': True},
}
_attribute_map = {
'client_app_id': {'key': 'clientAppID', 'type': 'str'},
'server_app_id': {'key': 'serverAppID', 'type': 'str'},
'server_app_secret': {'key': 'serverAppSecret', 'type': 'str'},
'tenant_id': {'key': 'tenantID', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAADProfile, self).__init__(**kwargs)
self.client_app_id = kwargs.get('client_app_id', None)
self.server_app_id = kwargs.get('server_app_id', None)
self.server_app_secret = kwargs.get('server_app_secret', None)
self.tenant_id = kwargs.get('tenant_id', None)
class ManagedClusterAccessProfile(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kube_config': {'key': 'properties.kubeConfig', 'type': 'bytearray'},
}
def __init__(self, **kwargs):
super(ManagedClusterAccessProfile, self).__init__(**kwargs)
self.kube_config = kwargs.get('kube_config', None)
class ManagedClusterAddonProfile(Model):
_validation = {
'enabled': {'required': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'config': {'key': 'config', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ManagedClusterAddonProfile, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.config = kwargs.get('config', None)
class ManagedClusterAgentPoolProfileProperties(Model):
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfileProperties, self).__init__(**kwargs)
self.count = kwargs.get('count', 1)
self.vm_size = kwargs.get('vm_size', None)
self.os_disk_size_gb = kwargs.get('os_disk_size_gb', None)
self.vnet_subnet_id = kwargs.get('vnet_subnet_id', None)
self.max_pods = kwargs.get('max_pods', None)
self.os_type = kwargs.get('os_type', "Linux")
self.max_count = kwargs.get('max_count', None)
self.min_count = kwargs.get('min_count', None)
self.enable_auto_scaling = kwargs.get('enable_auto_scaling', None)
self.type = kwargs.get('type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
self.provisioning_state = None
self.availability_zones = kwargs.get('availability_zones', None)
class ManagedClusterAgentPoolProfile(ManagedClusterAgentPoolProfileProperties):
_validation = {
'count': {'required': True, 'maximum': 100, 'minimum': 1},
'vm_size': {'required': True},
'provisioning_state': {'readonly': True},
'name': {'required': True, 'pattern': r'^[a-z][a-z0-9]{0,11}$'},
}
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'vm_size': {'key': 'vmSize', 'type': 'str'},
'os_disk_size_gb': {'key': 'osDiskSizeGB', 'type': 'int'},
'vnet_subnet_id': {'key': 'vnetSubnetID', 'type': 'str'},
'max_pods': {'key': 'maxPods', 'type': 'int'},
'os_type': {'key': 'osType', 'type': 'str'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'min_count': {'key': 'minCount', 'type': 'int'},
'enable_auto_scaling': {'key': 'enableAutoScaling', 'type': 'bool'},
'type': {'key': 'type', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'availability_zones': {'key': 'availabilityZones', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterAgentPoolProfile, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class ManagedClusterPoolUpgradeProfile(Model):
_validation = {
'kubernetes_version': {'required': True},
'os_type': {'required': True},
}
_attribute_map = {
'kubernetes_version': {'key': 'kubernetesVersion', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'upgrades': {'key': 'upgrades', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ManagedClusterPoolUpgradeProfile, self).__init__(**kwargs)
self.kubernetes_version = kwargs.get('kubernetes_version', None)
self.name = kwargs.get('name', None)
self.os_type = kwargs.get('os_type', "Linux")
self.upgrades = kwargs.get('upgrades', None)
class ManagedClusterServicePrincipalProfile(Model):
_validation = {
'client_id': {'required': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ManagedClusterServicePrincipalProfile, self).__init__(**kwargs)
self.client_id = kwargs.get('client_id', None)
self.secret = kwargs.get('secret', None)
class ManagedClusterUpgradeProfile(Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'control_plane_profile': {'required': True},
'agent_pool_profiles': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'control_plane_profile': {'key': 'properties.controlPlaneProfile', 'type': 'ManagedClusterPoolUpgradeProfile'},
'agent_pool_profiles': {'key': 'properties.agentPoolProfiles', 'type': '[ManagedClusterPoolUpgradeProfile]'},
}
def __init__(self, **kwargs):
super(ManagedClusterUpgradeProfile, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.control_plane_profile = kwargs.get('control_plane_profile', None)
self.agent_pool_profiles = kwargs.get('agent_pool_profiles', None)
class OperationValue(Model):
_validation = {
'origin': {'readonly': True},
'name': {'readonly': True},
'operation': {'readonly': True},
'resource': {'readonly': True},
'description': {'readonly': True},
'provider': {'readonly': True},
}
_attribute_map = {
'origin': {'key': 'origin', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'operation': {'key': 'display.operation', 'type': 'str'},
'resource': {'key': 'display.resource', 'type': 'str'},
'description': {'key': 'display.description', 'type': 'str'},
'provider': {'key': 'display.provider', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationValue, self).__init__(**kwargs)
self.origin = None
self.name = None
self.operation = None
self.resource = None
self.description = None
self.provider = None
class OrchestratorProfile(Model):
_validation = {
'orchestrator_type': {'required': True},
'orchestrator_version': {'required': True},
}
_attribute_map = {
'orchestrator_type': {'key': 'orchestratorType', 'type': 'str'},
'orchestrator_version': {'key': 'orchestratorVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OrchestratorProfile, self).__init__(**kwargs)
self.orchestrator_type = kwargs.get('orchestrator_type', None)
self.orchestrator_version = kwargs.get('orchestrator_version', None)
class TagsObject(Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TagsObject, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
| true
| true
|
7904049aa69210bd9a6fd8dccbcd790dc4d0c7e4
| 2,684
|
py
|
Python
|
oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py
|
ekatef/oemof-examples
|
f16511d20008c30889a6e75a788a3a1a0bc632c2
|
[
"MIT"
] | 28
|
2018-11-10T12:14:04.000Z
|
2022-01-14T00:01:09.000Z
|
oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py
|
ekatef/oemof-examples
|
f16511d20008c30889a6e75a788a3a1a0bc632c2
|
[
"MIT"
] | 28
|
2018-11-08T06:58:06.000Z
|
2022-02-22T18:58:17.000Z
|
oemof_examples/oemof.solph/v0.4.x/activity_costs/activity_costs.py
|
oemof/examples
|
4805d5cef03141a917fd8a9e1141acfa8cc9d781
|
[
"MIT"
] | 55
|
2018-11-09T09:50:36.000Z
|
2022-03-08T10:31:02.000Z
|
# -*- coding: utf-8 -*-
"""
General description
-------------------
This example illustrates the effect of activity_costs.
There are the following components:
- demand_heat: heat demand (constant, for the sake of simplicity)
- fireplace: wood firing, burns "for free" if somebody is around
- boiler: gas firing, consumes (paid) gas
Notice that activity_costs is an attribute to NonConvex.
This is because it relies on the activity status of a component
which is only available for nonconvex flows.
Installation requirements
-------------------------
This example requires version 0.3 of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
"""
import numpy as np
import pandas as pd
from oemof import solph
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
##########################################################################
# Calculate parameters and initialize the energy system and
##########################################################################
periods = 24
time = pd.date_range('1/1/2018', periods=periods, freq='H')
demand_heat = np.full(periods, 5)
demand_heat[:4] = 0
demand_heat[4:18] = 4
activity_costs = np.full(periods, 5)
activity_costs[18:] = 0
es = solph.EnergySystem(timeindex=time)
b_heat = solph.Bus(label='b_heat')
es.add(b_heat)
sink_heat = solph.Sink(
label='demand',
inputs={b_heat: solph.Flow(fix=demand_heat, nominal_value=1)})
fireplace = solph.Source(
label='fireplace',
outputs={b_heat: solph.Flow(nominal_value=3,
variable_costs=0,
nonconvex=solph.NonConvex(
activity_costs=activity_costs))})
boiler = solph.Source(
label='boiler',
outputs={b_heat: solph.Flow(nominal_value=10,
variable_costs=1)})
es.add(sink_heat, fireplace, boiler)
##########################################################################
# Optimise the energy system
##########################################################################
# create an optimization problem and solve it
om = solph.Model(es)
# solve model
om.solve(solver='cbc', solve_kwargs={'tee': True})
##########################################################################
# Check and plot the results
##########################################################################
results = solph.processing.results(om)
# plot data
if plt is not None:
data = solph.views.node(results, 'b_heat')['sequences']
ax = data.plot(kind='line', drawstyle='steps-post', grid=True, rot=0)
ax.set_xlabel('Time')
ax.set_ylabel('Heat (arb. units)')
plt.show()
| 27.387755
| 74
| 0.554024
|
import numpy as np
import pandas as pd
from oemof import solph
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
| true
| true
|
790405ca1ec43439286778dca338ff1f1288395f
| 473
|
py
|
Python
|
tests/mock_requests.py
|
OtGabaldon/multiSourceWordMaps
|
923caebbcd7cabce7dc4bb9ef9c845c4c9ea6ad5
|
[
"MIT"
] | null | null | null |
tests/mock_requests.py
|
OtGabaldon/multiSourceWordMaps
|
923caebbcd7cabce7dc4bb9ef9c845c4c9ea6ad5
|
[
"MIT"
] | null | null | null |
tests/mock_requests.py
|
OtGabaldon/multiSourceWordMaps
|
923caebbcd7cabce7dc4bb9ef9c845c4c9ea6ad5
|
[
"MIT"
] | null | null | null |
import os
class MockRequests:
def __init__(self):
return
def get(self, source):
source_no_http = source.replace("http://","")
test_website_path = f"{os.path.dirname(os.path.abspath(__file__))}/test_data/test_website/{source_no_http}"
with open(test_website_path,'r') as website_file:
return MockData(website_file.read())
class MockData:
def __init__(self,text):
self.text = text
| 26.277778
| 116
| 0.621564
|
import os
class MockRequests:
def __init__(self):
return
def get(self, source):
source_no_http = source.replace("http://","")
test_website_path = f"{os.path.dirname(os.path.abspath(__file__))}/test_data/test_website/{source_no_http}"
with open(test_website_path,'r') as website_file:
return MockData(website_file.read())
class MockData:
def __init__(self,text):
self.text = text
| true
| true
|
7904061c657812f2da8fb8d8888576d00ed82345
| 146
|
py
|
Python
|
virtual/bin/django-admin.py
|
Anitha987/Hood-Track
|
7ecaecbb68a56c27aa396fddcdcbacb185d9e007
|
[
"Unlicense"
] | null | null | null |
virtual/bin/django-admin.py
|
Anitha987/Hood-Track
|
7ecaecbb68a56c27aa396fddcdcbacb185d9e007
|
[
"Unlicense"
] | null | null | null |
virtual/bin/django-admin.py
|
Anitha987/Hood-Track
|
7ecaecbb68a56c27aa396fddcdcbacb185d9e007
|
[
"Unlicense"
] | null | null | null |
#!/home/anitha/Track/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 24.333333
| 42
| 0.780822
|
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| true
| true
|
7904075b7c11dede08810f8bfc4e12b090cad734
| 10,850
|
py
|
Python
|
src/data/datasets/mimic_cxr/section_parser.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | 3
|
2021-12-15T07:53:36.000Z
|
2022-01-05T17:02:45.000Z
|
src/data/datasets/mimic_cxr/section_parser.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | null | null | null |
src/data/datasets/mimic_cxr/section_parser.py
|
philip-mueller/lovt
|
91cf2094a0e140b8431b8e4ebadc56547a8df6b2
|
[
"MIT"
] | 3
|
2021-12-14T11:17:43.000Z
|
2021-12-16T07:35:43.000Z
|
"""
From https://zenodo.org/record/3539363
"""
import re
def section_text(text):
"""Splits text into sections.
Assumes text is in a radiology report format, e.g.:
COMPARISON: Chest radiograph dated XYZ.
IMPRESSION: ABC...
Given text like this, it will output text from each section,
where the section type is determined by the all caps header.
Returns a three element tuple:
sections - list containing the text of each section
section_names - a normalized version of the section name
section_idx - list of start indices of the text in the section
"""
p_section = re.compile(
r'\n ([A-Z ()/,-]+):\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
# get the start of the text for this section
idx_start = s.end()
# skip past the first newline to avoid some bad parses
idx_skip = text[idx_start:].find('\n')
if idx_skip == -1:
idx_skip = 0
s = p_section.search(text, idx_start + idx_skip)
if s is None:
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
# remove empty sections
# this handles when the report starts with a finding-like statement
# .. but this statement is not a section, more like a report title
# e.g. p10/p10103318/s57408307
# CHEST, PA LATERAL:
#
# INDICATION: This is the actual section ....
# it also helps when there are multiple findings sections
# usually one is empty
for i in reversed(range(len(section_names))):
if section_names[i] in ('impression', 'findings'):
if sections[i].strip() == '':
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if ('impression' not in section_names) & ('findings' not in section_names):
# create a new section for the final paragraph
if '\n \n' in sections[-1]:
sections.append('\n \n'.join(sections[-1].split('\n \n')[1:]))
sections[-2] = sections[-2].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append(section_idx[-1] + len(sections[-2]))
return sections, section_names, section_idx
def normalize_section_names(section_names):
# first, lower case all
section_names = [s.lower().strip() for s in section_names]
frequent_sections = {
"preamble": "preamble", # 227885
"impression": "impression", # 187759
"comparison": "comparison", # 154647
"indication": "indication", # 153730
"findings": "findings", # 149842
"examination": "examination", # 94094
"technique": "technique", # 81402
"history": "history", # 45624
"comparisons": "comparison", # 8686
"clinical history": "history", # 7121
"reason for examination": "indication", # 5845
"notification": "notification", # 5749
"reason for exam": "indication", # 4430
"clinical information": "history", # 4024
"exam": "examination", # 3907
"clinical indication": "indication", # 1945
"conclusion": "impression", # 1802
"chest, two views": "findings", # 1735
"recommendation(s)": "recommendations", # 1700
"type of examination": "examination", # 1678
"reference exam": "comparison", # 347
"patient history": "history", # 251
"addendum": "addendum", # 183
"comparison exam": "comparison", # 163
"date": "date", # 108
"comment": "comment", # 88
"findings and impression": "impression", # 87
"wet read": "wet read", # 83
"comparison film": "comparison", # 79
"recommendations": "recommendations", # 72
"findings/impression": "impression", # 47
"pfi": "history",
'recommendation': 'recommendations',
'wetread': 'wet read',
'ndication': 'impression', # 1
'impresson': 'impression', # 2
'imprression': 'impression', # 1
'imoression': 'impression', # 1
'impressoin': 'impression', # 1
'imprssion': 'impression', # 1
'impresion': 'impression', # 1
'imperssion': 'impression', # 1
'mpression': 'impression', # 1
'impession': 'impression', # 3
'findings/ impression': 'impression', # ,1
'finding': 'findings', # ,8
'findins': 'findings',
'findindgs': 'findings', # ,1
'findgings': 'findings', # ,1
'findngs': 'findings', # ,1
'findnings': 'findings', # ,1
'finidngs': 'findings', # ,2
'idication': 'indication', # ,1
'reference findings': 'findings', # ,1
'comparision': 'comparison', # ,2
'comparsion': 'comparison', # ,1
'comparrison': 'comparison', # ,1
'comparisions': 'comparison' # ,1
}
p_findings = [
'chest',
'portable',
'pa and lateral',
'lateral and pa',
'ap and lateral',
'lateral and ap',
'frontal and',
'two views',
'frontal view',
'pa view',
'ap view',
'one view',
'lateral view',
'bone window',
'frontal upright',
'frontal semi-upright',
'ribs',
'pa and lat'
]
p_findings = re.compile('({})'.format('|'.join(p_findings)))
main_sections = [
'impression', 'findings', 'history', 'comparison',
'addendum'
]
for i, s in enumerate(section_names):
if s in frequent_sections:
section_names[i] = frequent_sections[s]
continue
main_flag = False
for m in main_sections:
if m in s:
section_names[i] = m
main_flag = True
break
if main_flag:
continue
m = p_findings.search(s)
if m is not None:
section_names[i] = 'findings'
# if it looks like it is describing the entire study
# it's equivalent to findings
# group similar phrasings for impression
return section_names
def custom_mimic_cxr_rules():
custom_section_names = {
's50913680': 'recommendations', # files/p11/p11851243/s50913680.txt
's59363654': 'examination', # files/p12/p12128253/s59363654.txt
's59279892': 'technique', # files/p13/p13150370/s59279892.txt
's59768032': 'recommendations', # files/p13/p13249077/s59768032.txt
's57936451': 'indication', # files/p14/p14325424/s57936451.txt
's50058765': 'indication', # files/p14/p14731346/s50058765.txt
's53356173': 'examination', # files/p15/p15898350/s53356173.txt
's53202765': 'technique', # files/p16/p16076182/s53202765.txt
's50808053': 'technique', # files/p16/p16631485/s50808053.txt
's51966317': 'indication', # files/p10/p10817099/s51966317.txt
's50743547': 'examination', # files/p11/p11388341/s50743547.txt
's56451190': 'note', # files/p11/p11842879/s56451190.txt
's59067458': 'recommendations', # files/p11/p11984647/s59067458.txt
's59215320': 'examination', # files/p12/p12408912/s59215320.txt
's55124749': 'indication', # files/p12/p12428492/s55124749.txt
's54365831': 'indication', # files/p13/p13876470/s54365831.txt
's59087630': 'recommendations', # files/p14/p14267880/s59087630.txt
's58157373': 'recommendations', # files/p15/p15032392/s58157373.txt
's56482935': 'recommendations', # files/p15/p15388421/s56482935.txt
's58375018': 'recommendations', # files/p15/p15505556/s58375018.txt
's54654948': 'indication', # files/p17/p17090359/s54654948.txt
's55157853': 'examination', # files/p18/p18975498/s55157853.txt
's51491012': 'history', # files/p19/p19314266/s51491012.txt
}
custom_indices = {
's50525523': [201, 349], # files/p10/p10602608/s50525523.txt
's57564132': [233, 554], # files/p10/p10637168/s57564132.txt
's59982525': [313, 717], # files/p11/p11989982/s59982525.txt
's53488209': [149, 475], # files/p12/p12458657/s53488209.txt
's54875119': [234, 988], # files/p13/p13687044/s54875119.txt
's50196495': [59, 399], # files/p13/p13894879/s50196495.txt
's56579911': [59, 218], # files/p15/p15394326/s56579911.txt
's52648681': [292, 631], # files/p15/p15666238/s52648681.txt
's59889364': [172, 453], # files/p15/p15835529/s59889364.txt
's53514462': [73, 377], # files/p16/p16297706/s53514462.txt
's59505494': [59, 450], # files/p16/p16730991/s59505494.txt
's53182247': [59, 412], # files/p16/p16770442/s53182247.txt
's51410602': [47, 320], # files/p17/p17069955/s51410602.txt
's56412866': [522, 822], # files/p17/p17612000/s56412866.txt
's54986978': [59, 306], # files/p17/p17912487/s54986978.txt
's59003148': [262, 505], # files/p17/p17916384/s59003148.txt
's57150433': [61, 394], # files/p18/p18335791/s57150433.txt
's56760320': [219, 457], # files/p18/p18418794/s56760320.txt
's59562049': [158, 348], # files/p18/p18502016/s59562049.txt
's52674888': [145, 296], # files/p19/p19381919/s52674888.txt
's55258338': [192, 568], # files/p13/p13719117/s55258338.txt
's59330497': [140, 655], # files/p15/p15479218/s59330497.txt
's52119491': [179, 454], # files/p17/p17959278/s52119491.txt
# below have no findings at all in the entire report
's58235663': [0, 0], # files/p11/p11573679/s58235663.txt
's50798377': [0, 0], # files/p12/p12632853/s50798377.txt
's54168089': [0, 0], # files/p14/p14463099/s54168089.txt
's53071062': [0, 0], # files/p15/p15774521/s53071062.txt
's56724958': [0, 0], # files/p16/p16175671/s56724958.txt
's54231141': [0, 0], # files/p16/p16312859/s54231141.txt
's53607029': [0, 0], # files/p17/p17603668/s53607029.txt
's52035334': [0, 0], # files/p19/p19349312/s52035334.txt
}
return custom_section_names, custom_indices
| 39.59854
| 79
| 0.589677
|
import re
def section_text(text):
p_section = re.compile(
r'\n ([A-Z ()/,-]+):\s', re.DOTALL)
sections = list()
section_names = list()
section_idx = list()
idx = 0
s = p_section.search(text, idx)
if s:
sections.append(text[0:s.start(1)])
section_names.append('preamble')
section_idx.append(0)
while s:
current_section = s.group(1).lower()
idx_start = s.end()
idx_skip = text[idx_start:].find('\n')
if idx_skip == -1:
idx_skip = 0
s = p_section.search(text, idx_start + idx_skip)
if s is None:
idx_end = len(text)
else:
idx_end = s.start()
sections.append(text[idx_start:idx_end])
section_names.append(current_section)
section_idx.append(idx_start)
else:
sections.append(text)
section_names.append('full report')
section_idx.append(0)
section_names = normalize_section_names(section_names)
for i in reversed(range(len(section_names))):
if section_names[i] in ('impression', 'findings'):
if sections[i].strip() == '':
sections.pop(i)
section_names.pop(i)
section_idx.pop(i)
if ('impression' not in section_names) & ('findings' not in section_names):
if '\n \n' in sections[-1]:
sections.append('\n \n'.join(sections[-1].split('\n \n')[1:]))
sections[-2] = sections[-2].split('\n \n')[0]
section_names.append('last_paragraph')
section_idx.append(section_idx[-1] + len(sections[-2]))
return sections, section_names, section_idx
def normalize_section_names(section_names):
section_names = [s.lower().strip() for s in section_names]
frequent_sections = {
"preamble": "preamble",
"impression": "impression",
"comparison": "comparison",
"indication": "indication",
"findings": "findings",
"examination": "examination",
"technique": "technique",
"history": "history",
"comparisons": "comparison",
"clinical history": "history",
"reason for examination": "indication",
"notification": "notification",
"reason for exam": "indication",
"clinical information": "history",
"exam": "examination",
"clinical indication": "indication",
"conclusion": "impression",
"chest, two views": "findings",
"recommendation(s)": "recommendations",
"type of examination": "examination",
"reference exam": "comparison",
"patient history": "history",
"addendum": "addendum",
"comparison exam": "comparison",
"date": "date",
"comment": "comment",
"findings and impression": "impression",
"wet read": "wet read",
"comparison film": "comparison",
"recommendations": "recommendations",
"findings/impression": "impression",
"pfi": "history",
'recommendation': 'recommendations',
'wetread': 'wet read',
'ndication': 'impression',
'impresson': 'impression',
'imprression': 'impression',
'imoression': 'impression',
'impressoin': 'impression',
'imprssion': 'impression',
'impresion': 'impression',
'imperssion': 'impression',
'mpression': 'impression',
'impession': 'impression',
'findings/ impression': 'impression',
'finding': 'findings',
'findins': 'findings',
'findindgs': 'findings',
'findgings': 'findings',
'findngs': 'findings',
'findnings': 'findings',
'finidngs': 'findings',
'idication': 'indication',
'reference findings': 'findings',
'comparision': 'comparison',
'comparsion': 'comparison',
'comparrison': 'comparison',
'comparisions': 'comparison'
}
p_findings = [
'chest',
'portable',
'pa and lateral',
'lateral and pa',
'ap and lateral',
'lateral and ap',
'frontal and',
'two views',
'frontal view',
'pa view',
'ap view',
'one view',
'lateral view',
'bone window',
'frontal upright',
'frontal semi-upright',
'ribs',
'pa and lat'
]
p_findings = re.compile('({})'.format('|'.join(p_findings)))
main_sections = [
'impression', 'findings', 'history', 'comparison',
'addendum'
]
for i, s in enumerate(section_names):
if s in frequent_sections:
section_names[i] = frequent_sections[s]
continue
main_flag = False
for m in main_sections:
if m in s:
section_names[i] = m
main_flag = True
break
if main_flag:
continue
m = p_findings.search(s)
if m is not None:
section_names[i] = 'findings'
# group similar phrasings for impression
return section_names
def custom_mimic_cxr_rules():
custom_section_names = {
's50913680': 'recommendations', # files/p11/p11851243/s50913680.txt
's59363654': 'examination', # files/p12/p12128253/s59363654.txt
's59279892': 'technique', # files/p13/p13150370/s59279892.txt
's59768032': 'recommendations', # files/p13/p13249077/s59768032.txt
's57936451': 'indication', # files/p14/p14325424/s57936451.txt
's50058765': 'indication', # files/p14/p14731346/s50058765.txt
's53356173': 'examination', # files/p15/p15898350/s53356173.txt
's53202765': 'technique', # files/p16/p16076182/s53202765.txt
's50808053': 'technique', # files/p16/p16631485/s50808053.txt
's51966317': 'indication', # files/p10/p10817099/s51966317.txt
's50743547': 'examination', # files/p11/p11388341/s50743547.txt
's56451190': 'note', # files/p11/p11842879/s56451190.txt
's59067458': 'recommendations', # files/p11/p11984647/s59067458.txt
's59215320': 'examination', # files/p12/p12408912/s59215320.txt
's55124749': 'indication', # files/p12/p12428492/s55124749.txt
's54365831': 'indication', # files/p13/p13876470/s54365831.txt
's59087630': 'recommendations', # files/p14/p14267880/s59087630.txt
's58157373': 'recommendations', # files/p15/p15032392/s58157373.txt
's56482935': 'recommendations', # files/p15/p15388421/s56482935.txt
's58375018': 'recommendations', # files/p15/p15505556/s58375018.txt
's54654948': 'indication', # files/p17/p17090359/s54654948.txt
's55157853': 'examination', # files/p18/p18975498/s55157853.txt
's51491012': 'history', # files/p19/p19314266/s51491012.txt
}
custom_indices = {
's50525523': [201, 349], # files/p10/p10602608/s50525523.txt
's57564132': [233, 554], # files/p10/p10637168/s57564132.txt
's59982525': [313, 717], # files/p11/p11989982/s59982525.txt
's53488209': [149, 475], # files/p12/p12458657/s53488209.txt
's54875119': [234, 988], # files/p13/p13687044/s54875119.txt
's50196495': [59, 399], # files/p13/p13894879/s50196495.txt
's56579911': [59, 218], # files/p15/p15394326/s56579911.txt
's52648681': [292, 631], # files/p15/p15666238/s52648681.txt
's59889364': [172, 453], # files/p15/p15835529/s59889364.txt
's53514462': [73, 377], # files/p16/p16297706/s53514462.txt
's59505494': [59, 450], # files/p16/p16730991/s59505494.txt
's53182247': [59, 412], # files/p16/p16770442/s53182247.txt
's51410602': [47, 320], # files/p17/p17069955/s51410602.txt
's56412866': [522, 822], # files/p17/p17612000/s56412866.txt
's54986978': [59, 306], # files/p17/p17912487/s54986978.txt
's59003148': [262, 505], # files/p17/p17916384/s59003148.txt
's57150433': [61, 394], # files/p18/p18335791/s57150433.txt
's56760320': [219, 457], # files/p18/p18418794/s56760320.txt
's59562049': [158, 348], # files/p18/p18502016/s59562049.txt
's52674888': [145, 296], # files/p19/p19381919/s52674888.txt
's55258338': [192, 568], # files/p13/p13719117/s55258338.txt
's59330497': [140, 655], # files/p15/p15479218/s59330497.txt
's52119491': [179, 454], # files/p17/p17959278/s52119491.txt
# below have no findings at all in the entire report
's58235663': [0, 0], # files/p11/p11573679/s58235663.txt
's50798377': [0, 0], # files/p12/p12632853/s50798377.txt
's54168089': [0, 0], # files/p14/p14463099/s54168089.txt
's53071062': [0, 0], # files/p15/p15774521/s53071062.txt
's56724958': [0, 0], # files/p16/p16175671/s56724958.txt
's54231141': [0, 0], # files/p16/p16312859/s54231141.txt
's53607029': [0, 0], # files/p17/p17603668/s53607029.txt
's52035334': [0, 0], # files/p19/p19349312/s52035334.txt
}
return custom_section_names, custom_indices
| true
| true
|
79040934076be8f5568f60b84038d03e8bde4326
| 5,074
|
py
|
Python
|
test/functional/test_framework/wallet_util.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/wallet_util.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/wallet_util.py
|
ludirium/ludirium
|
d2c6d7855ed98c62b6c9431e695d9b1a791255a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
from collections import namedtuple
from test_framework.address import (
byte_to_base58,
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.key import ECKey
from test_framework.script import (
CScript,
OP_2,
OP_3,
OP_CHECKMULTISIG,
)
from test_framework.script_util import (
key_to_p2pkh_script,
key_to_p2wpkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.util import hex_str_to_bytes
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr',
'p2wpkh_script',
'p2wpkh_addr',
'p2sh_p2wpkh_script',
'p2sh_p2wpkh_redeem_script',
'p2sh_p2wpkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script',
'p2wsh_script',
'p2wsh_addr',
'p2sh_p2wsh_script',
'p2sh_p2wsh_addr'])
def get_key(node):
"""Generate a fresh key on node
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_generate_key():
"""Generate a fresh key
Returns a named tuple of privkey, pubkey and all address and scripts."""
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
return Key(privkey=privkey,
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
"""Generate a fresh 2-of-3 multisig on node
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
p2sh_script=script_to_p2sh_script(script_code).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = node.getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
def bytes_to_wif(b, compressed=True):
if compressed:
b += b'\x01'
return byte_to_base58(b, 239)
def generate_wif_key():
# Makes a WIF privkey for imports
k = ECKey()
k.generate()
return bytes_to_wif(k.get_bytes(), k.is_compressed)
| 39.640625
| 118
| 0.615294
|
from collections import namedtuple
from test_framework.address import (
byte_to_base58,
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.key import ECKey
from test_framework.script import (
CScript,
OP_2,
OP_3,
OP_CHECKMULTISIG,
)
from test_framework.script_util import (
key_to_p2pkh_script,
key_to_p2wpkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.util import hex_str_to_bytes
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr',
'p2wpkh_script',
'p2wpkh_addr',
'p2sh_p2wpkh_script',
'p2sh_p2wpkh_redeem_script',
'p2sh_p2wpkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script',
'p2wsh_script',
'p2wsh_addr',
'p2sh_p2wsh_script',
'p2sh_p2wsh_addr'])
def get_key(node):
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_generate_key():
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
return Key(privkey=privkey,
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
p2sh_script=script_to_p2sh_script(script_code).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
addr_info = node.getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
def bytes_to_wif(b, compressed=True):
if compressed:
b += b'\x01'
return byte_to_base58(b, 239)
def generate_wif_key():
k = ECKey()
k.generate()
return bytes_to_wif(k.get_bytes(), k.is_compressed)
| true
| true
|
790409f8a12c8f871efa01911ec036b9aeba5b8f
| 744
|
py
|
Python
|
py3canvas/tests/quiz_submission_files.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/quiz_submission_files.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
py3canvas/tests/quiz_submission_files.py
|
tylerclair/py3canvas
|
7485d458606b65200f0ffa5bbe597a9d0bee189f
|
[
"MIT"
] | null | null | null |
"""QuizSubmissionFiles API Tests for Version 1.0.
This is a testing template for the generated QuizSubmissionFilesAPI Class.
"""
import unittest
import requests
import secrets
from py3canvas.apis.quiz_submission_files import QuizSubmissionFilesAPI
class TestQuizSubmissionFilesAPI(unittest.TestCase):
"""Tests for the QuizSubmissionFilesAPI."""
def setUp(self):
self.client = QuizSubmissionFilesAPI(
secrets.instance_address, secrets.access_token
)
def test_upload_file(self):
"""Integration test for the QuizSubmissionFilesAPI.upload_file method."""
# This method utilises the POST request method and will make changes to the Canvas instance. This needs consideration.
pass
| 32.347826
| 126
| 0.752688
|
import unittest
import requests
import secrets
from py3canvas.apis.quiz_submission_files import QuizSubmissionFilesAPI
class TestQuizSubmissionFilesAPI(unittest.TestCase):
def setUp(self):
self.client = QuizSubmissionFilesAPI(
secrets.instance_address, secrets.access_token
)
def test_upload_file(self):
pass
| true
| true
|
79040b3e983ee4bd243d74e0ee28efe1185f4293
| 926
|
py
|
Python
|
backoffice/migrations/0006_auto_20210225_1755.py
|
mono57/verger.stock-mgmt
|
f8de21738f435f9dc2d604e0aa8187a81d04c50a
|
[
"MIT"
] | null | null | null |
backoffice/migrations/0006_auto_20210225_1755.py
|
mono57/verger.stock-mgmt
|
f8de21738f435f9dc2d604e0aa8187a81d04c50a
|
[
"MIT"
] | 1
|
2021-02-23T15:54:30.000Z
|
2021-02-23T15:54:30.000Z
|
backoffice/migrations/0006_auto_20210225_1755.py
|
mono57/verger.stock-mgmt
|
f8de21738f435f9dc2d604e0aa8187a81d04c50a
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-02-25 17:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0005_auto_20210225_1712'),
]
operations = [
migrations.AddField(
model_name='buyingentry',
name='partition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='backoffice.partitionformulla'),
),
migrations.AlterField(
model_name='buyingentry',
name='buying',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='entries', to='backoffice.buying'),
),
migrations.AlterField(
model_name='buyingentry',
name='quantity',
field=models.IntegerField(verbose_name='Quantité achetée'),
),
]
| 30.866667
| 132
| 0.631749
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0005_auto_20210225_1712'),
]
operations = [
migrations.AddField(
model_name='buyingentry',
name='partition',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='backoffice.partitionformulla'),
),
migrations.AlterField(
model_name='buyingentry',
name='buying',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='entries', to='backoffice.buying'),
),
migrations.AlterField(
model_name='buyingentry',
name='quantity',
field=models.IntegerField(verbose_name='Quantité achetée'),
),
]
| true
| true
|
79040c03d6dad6d08291960465ba46fab0ba9ac7
| 1,634
|
py
|
Python
|
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/vsts/build/v4_1/models/source_provider_attributes.py
|
amcclead7336/Enterprise_Data_Science_Final
|
ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28
|
[
"Unlicense",
"MIT"
] | 2
|
2021-05-23T16:46:31.000Z
|
2021-05-26T23:51:09.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class SourceProviderAttributes(Model):
"""SourceProviderAttributes.
:param name: The name of the source provider.
:type name: str
:param supported_capabilities: The capabilities supported by this source provider.
:type supported_capabilities: dict
:param supported_triggers: The types of triggers supported by this source provider.
:type supported_triggers: list of :class:`SupportedTrigger <build.v4_1.models.SupportedTrigger>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_capabilities': {'key': 'supportedCapabilities', 'type': '{bool}'},
'supported_triggers': {'key': 'supportedTriggers', 'type': '[SupportedTrigger]'}
}
def __init__(self, name=None, supported_capabilities=None, supported_triggers=None):
super(SourceProviderAttributes, self).__init__()
self.name = name
self.supported_capabilities = supported_capabilities
self.supported_triggers = supported_triggers
| 48.058824
| 101
| 0.585679
|
from msrest.serialization import Model
class SourceProviderAttributes(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'supported_capabilities': {'key': 'supportedCapabilities', 'type': '{bool}'},
'supported_triggers': {'key': 'supportedTriggers', 'type': '[SupportedTrigger]'}
}
def __init__(self, name=None, supported_capabilities=None, supported_triggers=None):
super(SourceProviderAttributes, self).__init__()
self.name = name
self.supported_capabilities = supported_capabilities
self.supported_triggers = supported_triggers
| true
| true
|
79040d3424cd406b1eff8f8ac2307df05ba665ad
| 1,527
|
py
|
Python
|
regression/era/era.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-12-07T23:10:50.000Z
|
2022-02-19T03:01:49.000Z
|
regression/era/era.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 59
|
2019-01-04T15:43:30.000Z
|
2022-03-31T09:48:15.000Z
|
regression/era/era.py
|
b8raoult/magics
|
eb2c86ec6e392e89c90044128dc671f22283d6ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2019-01-07T14:36:33.000Z
|
2021-09-06T14:48:36.000Z
|
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
# importing Magics module
from Magics.macro import *
ref = 'era'
# Setting of the output file name
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name=ref)
# Setting the coordinates of the geographical area
projection = mmap(
subpage_x_length=24.,
subpage_upper_right_longitude=50.00,
subpage_upper_right_latitude=65.00,
subpage_lower_left_latitude=25.00,
subpage_lower_left_longitude=-20.0,
subpage_map_projection='cylindrical',
)
# Coastlines setting
coast = mcoast(map_grid='on', map_grid_colour='grey',
map_grid_thickness=2,
map_coastline_colour='RGB(0.4,0.4,0.4)',
map_coastline_thickness=3)
obs = mobs(
obsjson_input_filename = "small.json",
obs_template_file_name = "obs.template",
obs_identification = "on",
obs_size = 0.5,
obs_distance_apart = 0.
)
title = mtext(text_lines=["Observation plotting ..." ],
text_justification='left', text_font_size=0.8,
text_colour='charcoal')
# To the plot
plot(
output,
projection,
obs,
coast,
title,
)
| 25.032787
| 80
| 0.698756
|
from Magics.macro import *
ref = 'era'
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name=ref)
projection = mmap(
subpage_x_length=24.,
subpage_upper_right_longitude=50.00,
subpage_upper_right_latitude=65.00,
subpage_lower_left_latitude=25.00,
subpage_lower_left_longitude=-20.0,
subpage_map_projection='cylindrical',
)
coast = mcoast(map_grid='on', map_grid_colour='grey',
map_grid_thickness=2,
map_coastline_colour='RGB(0.4,0.4,0.4)',
map_coastline_thickness=3)
obs = mobs(
obsjson_input_filename = "small.json",
obs_template_file_name = "obs.template",
obs_identification = "on",
obs_size = 0.5,
obs_distance_apart = 0.
)
title = mtext(text_lines=["Observation plotting ..." ],
text_justification='left', text_font_size=0.8,
text_colour='charcoal')
plot(
output,
projection,
obs,
coast,
title,
)
| true
| true
|
79040d7b3d17c727a4cdd785008bae2aa56409a7
| 331
|
py
|
Python
|
HackerRank/Data Structures/Trees/height-of-a-binary-tree.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | 1
|
2020-03-17T23:54:32.000Z
|
2020-03-17T23:54:32.000Z
|
HackerRank/Data Structures/Trees/height-of-a-binary-tree.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | null | null | null |
HackerRank/Data Structures/Trees/height-of-a-binary-tree.py
|
danielfsousa/algorithms-solutions
|
038c0c0bf6d89ffb1ecea596e7d4bb9bd4154ff1
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem
def height(root):
"""
DFS
v = Vertices
e = Edges
d = Depth
Time complexity: O(v + e)
Space complexity: O(d)
"""
if root:
return 1 + max(height(root.left), height(root.right))
else:
return -1
| 17.421053
| 76
| 0.567976
|
def height(root):
if root:
return 1 + max(height(root.left), height(root.right))
else:
return -1
| true
| true
|
79040e17c7d11f1e2d106744bb6d5fbf91907f6f
| 65
|
py
|
Python
|
example/mylib-2.0/mylib.py
|
mitsuhiko/multiversion
|
3e7c6e1b95b73be633f498d381345b49a877a857
|
[
"BSD-3-Clause"
] | 37
|
2015-11-05T03:40:40.000Z
|
2021-11-22T23:18:19.000Z
|
example/mylib-2.0/mylib.py
|
mitsuhiko/multiversion
|
3e7c6e1b95b73be633f498d381345b49a877a857
|
[
"BSD-3-Clause"
] | 1
|
2019-05-17T08:16:21.000Z
|
2019-05-17T08:21:07.000Z
|
example/mylib-2.0/mylib.py
|
mitsuhiko/multiversion
|
3e7c6e1b95b73be633f498d381345b49a877a857
|
[
"BSD-3-Clause"
] | 9
|
2015-05-11T03:40:55.000Z
|
2022-02-07T16:22:15.000Z
|
version = '2.0'
import mylib
print 'self import in 2.0:', mylib
| 13
| 34
| 0.676923
|
version = '2.0'
import mylib
print 'self import in 2.0:', mylib
| false
| true
|
79040e7428db2955d89ca4c36c7ff848e0db155a
| 3,958
|
py
|
Python
|
face-rec/config.py
|
csgcmai/lqp_face
|
db4ec672b352044692f8d1bcbfa181b35567b95c
|
[
"BSD-3-Clause"
] | 1
|
2020-04-06T16:31:56.000Z
|
2020-04-06T16:31:56.000Z
|
face-rec/config.py
|
csgcmai/lqp_face
|
db4ec672b352044692f8d1bcbfa181b35567b95c
|
[
"BSD-3-Clause"
] | null | null | null |
face-rec/config.py
|
csgcmai/lqp_face
|
db4ec672b352044692f8d1bcbfa181b35567b95c
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file with default options,
# There are four main sections: General, Features, LQP and Learning corresponding to different
# functionalities. You can disable any of the Features or Learning section (by commenting it out) according to your requirement.
[General]
# general options
idir=/home/hussain/datasets/LFW/lfwa # images directory path
odir=/scratch/testing/new-experiments/ # path where cropped_images, learned model and computed features will be stored
dataset=LFW # name of dataset to use; it can be either LFW or FERET [currently not supported]
width=80 # width of cropped images
height=150 # height of cropped images
padding=10 # (same as cellsize) use a padding of one cell on each side. This value must be same as the option cell-size has in the features section
xoffset=1 # offsets to be added (from the center position) to the crop window placed over the original aligned images
yoffset=-4
cbdataset=train-val # complete # This option is used only with LQP Features. It is used to choose subset of dataset for codebook learning e.g. in case of LFW it can be either view1 training validation ('train-val') subset or complete view1 set('complete')
ftype=LQP # Feature types. Choice can be LBP, LTP, LBP+LTP or LQP
usergb=False # if color images, use color information during feature computations.
[Features]
# options for feature computation
listfile="" # a list file containing list of cropped images to compute features
cellsize=10 # cellsize for the histogram grid
tol=5 # [5,7] # tolerance values used for LTP or LQP features (can pass a list, i.e. tol=[5, 7])
[LQP] #LQP Options
lqptype=2 # LQP type represent LQP geometric structure.
# Choices can be either Disk (2) or Hor+Ver+Diag+ADiag (9) strip.
lqpsize=7 # LQP size represent radius (length of strip)
# of LQP disk (HVDA strip) (can pass a list i.e. lqpsize=[5,7])
coding=4 # LQP encoding type can be: Binary (0), Ternary (1) or Split-Ternary (4)
cbsize=150 # Codebook size (number of visual words) used for
# LQP computation (can pass a list, i.e. cbsize=[100, 150]
cbfile="" # [Optional] A list file containing list of images for learning the codebook
[Learning]
# options for model learning
view=complete # view2 # complete # Choice of the dataset, options cans be view1: used for
# parameter tuning purposes; view2: used only for model
# evaluation; complete: a model parameters will be first
# tuned on view1 and results will be reported on view2
ttype=with-pca # Choice of Training with or without PCA (for feature
# evaluation) Available options are with-pca or without-
# (a pca model is learned and features are compared in the pca space)
# or without-pca (features are compared in there original space)
featdir="" # Directory path where computed features have been stored, used if
# learning is being done without feature computation cycle.
dist=cosine # Distance metric for comparing features. Choices are cosine, chi-square and L2.
# For optimal results use cosine metric for comparing PCA reduced features and
# chi-squared for comparing non-reduced ones.
pcadim=[100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700, 1800, 1900, 2000] # Number of PCA components. You can pass a scalar or list, i.e.
# pcadim= 500. In case of a list, all the dimensions will be used
# for model learning (on view1) and finally only the best performing one will be
# kept. Note that a single model with max(pcadim) is learned in this case
# but evaluation is done using all the dimensions.
# Caution: providing a much higher dimension makes the learning slow and memory
# intensive
| 68.241379
| 256
| 0.698585
|
[General]
idir=/home/hussain/datasets/LFW/lfwa
odir=/scratch/testing/new-experiments/
dataset=LFW
width=80
height=150
padding=10
xoffset=1
yoffset=-4
cbdataset=train-val
| false
| true
|
79040ee42e9d48bfae8ed960aabe08c30bf0134f
| 1,135
|
py
|
Python
|
biotransformers/utils/deprecated.py
|
ahmed-dj/bio-transformers
|
f7a08d75c2496db2bbd42ca4fcb95a48fc43a325
|
[
"Apache-2.0"
] | null | null | null |
biotransformers/utils/deprecated.py
|
ahmed-dj/bio-transformers
|
f7a08d75c2496db2bbd42ca4fcb95a48fc43a325
|
[
"Apache-2.0"
] | null | null | null |
biotransformers/utils/deprecated.py
|
ahmed-dj/bio-transformers
|
f7a08d75c2496db2bbd42ca4fcb95a48fc43a325
|
[
"Apache-2.0"
] | null | null | null |
import functools
import warnings
def deprecated_alias(**aliases):
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def rename_kwargs(func_name, kwargs, aliases): # noqa
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise TypeError("{} received both {} and {}".format(func_name, alias, new))
warnings.warn("{} is deprecated; use {}".format(alias, new), DeprecationWarning, 3)
if alias == "device":
if kwargs[alias].__contains__("cuda"):
kwargs.pop(alias)
kwargs[new] = 1
elif kwargs[alias].__contains__("cpu"):
kwargs.pop(alias)
kwargs[new] = 0
else:
kwargs[new] = kwargs.pop(alias)
elif alias == "multi_gpu":
kwargs.pop(alias)
else:
kwargs[new] = kwargs.pop(alias)
| 29.868421
| 95
| 0.5163
|
import functools
import warnings
def deprecated_alias(**aliases):
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def rename_kwargs(func_name, kwargs, aliases):
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise TypeError("{} received both {} and {}".format(func_name, alias, new))
warnings.warn("{} is deprecated; use {}".format(alias, new), DeprecationWarning, 3)
if alias == "device":
if kwargs[alias].__contains__("cuda"):
kwargs.pop(alias)
kwargs[new] = 1
elif kwargs[alias].__contains__("cpu"):
kwargs.pop(alias)
kwargs[new] = 0
else:
kwargs[new] = kwargs.pop(alias)
elif alias == "multi_gpu":
kwargs.pop(alias)
else:
kwargs[new] = kwargs.pop(alias)
| true
| true
|
79040eed931a507c8701c847570903d6715a6f2e
| 9,615
|
py
|
Python
|
arcface/resnet_cbam.py
|
DerryHub/the-TaobaoLive-Commodity-Identify-Competition
|
7e5e5c4fbddd9949fe01810d58bd7994889c007c
|
[
"MIT"
] | 4
|
2020-08-15T14:49:37.000Z
|
2022-01-16T08:34:07.000Z
|
arcface/resnet_cbam.py
|
weilin-droid/the-TaobaoLive-Commodity-Identify-Competition
|
7e5e5c4fbddd9949fe01810d58bd7994889c007c
|
[
"MIT"
] | null | null | null |
arcface/resnet_cbam.py
|
weilin-droid/the-TaobaoLive-Commodity-Identify-Competition
|
7e5e5c4fbddd9949fe01810d58bd7994889c007c
|
[
"MIT"
] | 2
|
2021-05-26T05:16:09.000Z
|
2021-06-09T09:07:49.000Z
|
import torch
import torch.nn as nn
import math
from arcface.utils import l2_norm, Flatten, SentVec_TFIDF
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.ca = ChannelAttention(planes * 4)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
MODEL = {
50:{
'layers': [3, 4, 6, 3]
},
101:{
'layers': [3, 4, 23, 3]
},
152:{
'layers': [3, 8, 36, 3]
}
}
class ResNetCBAM(nn.Module):
def __init__(self, config):
super(ResNetCBAM, self).__init__()
embedding_size = config.embedding_size
drop_ratio = config.drop_ratio
model_dic = MODEL[config.num_layers_c]
layers = model_dic['layers']
# embedding_size = 2048
# drop_ratio = 0.1
# layers = [3, 4, 23, 3]
# self.sentvec = SentVec_TFIDF(embedding_size=embedding_size, root_dir='data/')
block = Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
# self.avgpool = nn.AvgPool2d(4, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512* block.expansion, 1000)
self.bn_last = nn.BatchNorm1d(embedding_size)
self.bn_last.bias.requires_grad_(False)
# self.output_layer = nn.Sequential(
# nn.BatchNorm2d(512 * block.expansion),
# nn.Dropout(drop_ratio),
# Flatten(),
# nn.Linear(512 * block.expansion, embedding_size),
# nn.BatchNorm1d(embedding_size))
# self.last_layer = nn.Sequential(
# nn.Linear(2*embedding_size, embedding_size),
# nn.BatchNorm1d(embedding_size)
# )
'''if not config.resume:
self._initialize_weights()
'''
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# print(x.size())
x = self.layer2(x)
# print(x.size())
x = self.layer3(x)
# print(x.size())
x = self.layer4(x)
# print(x.size())
x = self.avgpool(x)
# x = self.output_layer(x)
# sent = self.sentvec(text)
# x = torch.cat((x, sent), dim=1)
# x = self.last_layer(x)
x = torch.flatten(x, 1)
if self.training:
return x, self.bn_last(x)
else:
return l2_norm(self.bn_last(x))
if __name__ == "__main__":
net = ResNetCBAM('aa')
net.load_state_dict(torch.load('trained_models/resnet_cbam_101.pth'))
# del net.output_layer
# net.bn_last = nn.BatchNorm1d(2048)
# l = [3, 4, 6, 3]
# for i in range(3):
# net.layer1[i].ca = ChannelAttention(64 * 4)
# net.layer1[i].sa = SpatialAttention()
# for i in range(4):
# net.layer2[i].ca = ChannelAttention(64 * 8)
# net.layer2[i].sa = SpatialAttention()
# for i in range(6):
# net.layer3[i].ca = ChannelAttention(64 * 16)
# net.layer3[i].sa = SpatialAttention()
# for i in range(3):
# net.layer4[i].ca = ChannelAttention(64 * 32)
# net.layer4[i].sa = SpatialAttention()
# # net.sentvec = SentVec_TFIDF(embedding_size=512, root_dir='data/')
# net.output_layer = nn.Sequential(
# nn.BatchNorm2d(512* 4),
# nn.Dropout(0.1),
# Flatten(),
# nn.Linear(512 * 4, 4096),
# nn.BatchNorm1d(4096))
# del net.fc
torch.save(net.state_dict(), 'trained_models/resnet_cbam_101.pth')
a = torch.randn(5,3,224,224)
b = net(a)
print(b[0].size())
| 33.618881
| 87
| 0.562142
|
import torch
import torch.nn as nn
import math
from arcface.utils import l2_norm, Flatten, SentVec_TFIDF
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.ca = ChannelAttention(planes)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.ca = ChannelAttention(planes * 4)
self.sa = SpatialAttention()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
MODEL = {
50:{
'layers': [3, 4, 6, 3]
},
101:{
'layers': [3, 4, 23, 3]
},
152:{
'layers': [3, 8, 36, 3]
}
}
class ResNetCBAM(nn.Module):
def __init__(self, config):
super(ResNetCBAM, self).__init__()
embedding_size = config.embedding_size
drop_ratio = config.drop_ratio
model_dic = MODEL[config.num_layers_c]
layers = model_dic['layers']
block = Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.bn_last = nn.BatchNorm1d(embedding_size)
self.bn_last.bias.requires_grad_(False)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if self.training:
return x, self.bn_last(x)
else:
return l2_norm(self.bn_last(x))
if __name__ == "__main__":
net = ResNetCBAM('aa')
net.load_state_dict(torch.load('trained_models/resnet_cbam_101.pth'))
torch.save(net.state_dict(), 'trained_models/resnet_cbam_101.pth')
a = torch.randn(5,3,224,224)
b = net(a)
print(b[0].size())
| true
| true
|
79040fd264c62a3212a6d1e39120e3bd8b887c0c
| 15,407
|
py
|
Python
|
TableData.py
|
mokko/PyTableData
|
06890c8981bd1ca1de1a87c9c15f21c35b0fda8b
|
[
"Apache-2.0"
] | null | null | null |
TableData.py
|
mokko/PyTableData
|
06890c8981bd1ca1de1a87c9c15f21c35b0fda8b
|
[
"Apache-2.0"
] | null | null | null |
TableData.py
|
mokko/PyTableData
|
06890c8981bd1ca1de1a87c9c15f21c35b0fda8b
|
[
"Apache-2.0"
] | null | null | null |
import os
'''
TableData deals with data that comes from MS Excel, csv, xml. More precisely, it expects
a single table which has headings in the first row. It converts between these formats and usually keeps
information on a round trip between those formats identical.
TableData also allows for simple transformations, like dropping a column.
CONVENTIONS
*cid is column no or column id
*rid is row no or row id
*cell refers the content of a cell, a cell is represented by cid|rid, as two integers or (not sure yet) a tuple or a list
*cname is the column name (in row 0)
NOTE
* (x|y) not rows x cols
* Currently internal cells do have a type, which may be flattened to str if output is type agnostic.
* cid and rid begins with 0, so first cell is 0|0, but ncols and nrows start at 1. Strangely enough, sometimes that is convenient.
* interface prefers cname over cid
LIMITATIONS
Data is stored in memory (in a two dimensional list of lists), so max. size depends on available memory (ram).
WHAT NOT TO DO
I will NOT allow conversion INTO Excel xsl format, only reading from it.
I will not abstract this thing too far. I write it for my current Excel version and the csv flavor that I
need (e.g. csv is escaped only for values that contain commas). I don't need multiple Excel sheets,
formatting in Excel, lots of types in Excel.
UNICODE
I am going for UTF-8 encoding, but not sure I have it everywhere yet. xlrd is internally in UTF16LE, I believe.
Roundtrip Exceptions
*date
XML Format made by TableData is
<tdx>
<row>
<cnameA>cell value</cnameA>
<cnameB>cell value</cnameB>
...
</row>
</tdx>
The first row will have all columns, even empty ones. The other rows usually omit empty elements with empty values.
'''
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
'''
raise exception if column names (cnames) are not unique
'''
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
#todo: modern excel
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
#
# INGESTERS (xml, csv)
#
def load_table (path, verbose=None):
'''
File extension aware ingester
td=TableData.load_table(path)
This is an alternative to _init_. Is this pythonic enough?
'''
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
'''
Parses old excel file into tableData object. Only first sheet.
Dont use this directly, use
td=TableData('xsl', infile)
td=TableData.load=table(infile)
instead
xlrd uses UTF16. What comes out of here?
TO DO:
1. better tests for
-Unicode issues not tested
-Excel data fields change appearance
2. conversion/transformation stuff
'''
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[] # will hold sheet in memory as list of list
self.verbose ('xlrd infile %s' % infile)
#if not os.path.isfile(infile):
# raise Exception ('Input file not found')
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#I'm assuming here that first row consist only of text cells?
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
'''
Returns integer with number of columns in table data
'''
return len(self.table[0])
def nrows (self):
'''
Returns integer with number of rows in table data
'''
return len(self.table)
def cell (self, col,row):
'''
Return a cell for col,row.
td.cell(col,row)
Throws exception if col or row are not integer or out of range.
What happens on empty cell?
I stick to x|y format, although row|col might be more pythonic.
Empty cell is '' not None.
'''
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
'''
Returns the column index (c) for column name 'needle'.
Throws 'not in list' if 'needle' is not a column name (cname).
'''
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
'''
Returns list of cells [cid,rid] that contain the needle.
r=td.search(needle) # (1,1)
tuples, lists? I am not quite sure!
'''
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
'''
Returns list/set of rows that contain the needle for the given col.
td.search(cname, needle)
'''
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
'''
print representation of table
Really print? Why not.
'''
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
'''
Drop a row by number.
Need to remake the index to cover the hole.
'''
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
'''
Drop a column by cname
(Not tested.)
'''
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
'''
Add a new column called name at the end of the row.
Cells with be empty.
Returns the cid of the new column, same as cindex(cname).
'''
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
'''
empty cell in column cnameA if value in column cnameB equals needle in every row
untested
'''
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
'''
Delete row if column equals the value 'needle'
Should we use cname or c (colId)?
'''
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
'''
renames column cnameOld into cnameNew
'''
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
'''
Default Value: if cell is empty replace with default value
self.default_per_col ('status', 'filled')
'''
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
'''
write to file with extension-awareness
'''
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
'''
writes data in tableData object to outfile in csv format
Values with commas are quoted.
'''
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
'''
writes table data to file out in xml format
'''
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
#print ('x,y: %i/%i: %s->%s ' % (r, c, self.columns[c], cell))
#for round trip I need empty cells, at least in the first row
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
'''
Writes table data in json to file out
JSON doesn't have date type, hence default=str
'''
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
| 31.507157
| 130
| 0.541247
|
import os
class TableData:
def verbose (self, msg):
if self._verbose:
print (msg)
def _uniqueColumns (self):
if len(set(self.table[0])) != len(self.table[0]):
raise Exception('Column names not unique')
def __init__ (self, ingester, infile, verbose=None):
self._verbose=verbose
if ingester == 'xml':
self.XMLParser(infile)
elif ingester == 'xls':
self.XLRDParser(infile)
elif ingester == 'csv':
self.CSVParser(infile)
elif ingester == 'json':
self.JSONParser(infile)
else:
raise Exception ('Ingester %s not found' % ingester)
self._uniqueColumns()
def load_table (path, verbose=None):
ext=os.path.splitext(path)[1][1:]
return TableData (ext, path,verbose)
def XLRDParser (self, infile):
import xlrd
import xlrd.sheet
from xlrd.sheet import ctype_text
self.table=[]
self.verbose ('xlrd infile %s' % infile)
wb = xlrd.open_workbook(filename=infile, on_demand=True)
sheet= wb.sheet_by_index(0)
#start at r=0 because we want to preserve the columns
for r in range(0, sheet.nrows): #no
row=[]
for c in range(sheet.ncols):
cell = sheet.cell(r, c)
cellTypeStr = ctype_text.get(cell.ctype, 'unknown type')
val=cell.value
#convert cell types -> dates look changed, but may not be (seconds since epoch)!
if cellTypeStr == "number":
val=int(float(val))
elif cellTypeStr == "xldate":
val=xlrd.xldate.xldate_as_datetime(val, 0)
#Warn if comma -> to check if escaped correctly -> quoting works
#if ',' in str(val):
# self.verbose ("%i/%i contains a comma" % (c,r) )
row.append(val)
self.table.append(row)
wb.unload_sheet(0) #unload xlrd sheet to save memory
def CSVParser (self,infile):
import csv
self.table=[] # will hold sheet in memory as list of list
self.verbose ('csvParser: ' + str(infile))
with open(infile, mode='r', newline='') as csvfile:
incsv = csv.reader(csvfile, dialect='excel')
for row in incsv:
self.table.append(row)
#self.verbose (str(row))
def XMLParser (self,infile):
#It is practically impossible to reconstruct the full list of columns from xml file
#if xmlWriter leaves out empty elements. Instead, I write them at least for first row.
self.table=[] # will hold sheet in memory as list of list; overwrite
self.verbose ('xml infile %s' % infile)
import xml.etree.ElementTree as ET
tree = ET.parse(infile)
for row in tree.iter("row"):
c=0
cnames=[]
col=[]
for e in row.iter():
if e.tag !='row':
#self.verbose ('%s %s' % (e.tag, e.text))
if len(self.table) == 0:
#need to create 2 rows from first row in xml
cnames.append(e.tag)
col.append(e.text)
if len(self.table) == 0:
self.table.append(cnames)
self.table.append(col)
#self.verbose (self.table)
def JSONParser (self, infile):
self.table=[] # will hold sheet in memory as list of list; overwrite
import json
self.verbose ('json infile %s' % infile)
json_data = open(infile, 'r').read()
self.table = json.loads(json_data)
##
## read table data, but NO manipulations
##
def ncols(self):
return len(self.table[0])
def nrows (self):
return len(self.table)
def cell (self, col,row):
try:
return self.table[row][col]
except:
self.verbose ('%i|%i doesnt exist' % (col, row))
exit (1)
def cindex (self,needle):
return self.table[0].index(needle)
def colExists (self, cname):
try:
self.table[0].index(cname)
return True
except:
return False
def search (self, needle):
results=[]
for rid in range(0, self.nrows()):
for cid in range(0, self.ncols()):
cell=self.cell(cid, rid)
#self.verbose ('ce:'+str(cell))
if str(needle) in str(cell):
#self.verbose ("%i/%i:%s->%s" % (cid, rid, cell, needle))
results.append ((cid,rid))
return results
def search_col (self, cname, needle):
results=()
c=cindex(cname)
for rid in range(0, self.nrows()):
if needle in self.cell(c,rid):
results.append(rid)
def show (self):
for row in self.table:
print (row)
print ('Table size is %i x %i (cols x rows)' % (self.ncols(), self.nrows()))
##
## SIMPLE UNCONDITIONAL TRANSFORMATIONS
##
def delRow (self, r):
#r always means rid
self.table.pop(r)
#print ('row %i deleted' % r)
def delCol (self, cname):
c=self.cindex (cname)
for r in range(0, self.nrows()):
self.table[r].pop(c)
def addCol (self,name):
#update
self.table[0].append(name)
self._uniqueColumns()
for rid in range(1, self.nrows()):
self.table[rid].append('') # append empty cells for all rows
return len(self.table[0])-1 # len starts counting at 1, but I want 0
def clean_whitespace (self,cname):
cid=self.cindex(cname)
for rid in range(1, td.nrows()):
td.table[rid][cid]=td.table[rid][cid].replace('\r\n', ' ').replace(' ', ' ')
##
## MORE COMPLEX MANIPULATION
##
def delCellAIfColBEq (self,cnameA, cnameB, needle):
colA=self.cindex(cnameA)
colB=self.cindex(cnameB)
for rid in range(1, self.nrows()):
if self.table[rid][colB] == needle:
self.verbose ('delCellAifColBEq A:%s, B:%s, needle %s' % (cnameA, cnameB, needle))
selt.table[rid][colA]=''
def delCellAIfColBContains (self,col_a, col_b, needle): pass
def delRowIfColContains (self, cname, needle):
#cant loop thru rows and delete one during the loop
col=self.cindex(cname)
#it appears that excel and xlrd start with 1
#todo: not sure why I have shave off one here!
r=self.nrows()-1
while r> 1:
#print ('AA%i/%i: ' % (r,col))
cell=self.cell (r, col)
if needle in str(cell):
#print ('DD:%i/%s:%s' % (r, cname, cell))
#print ('delRowIfColEq: needle %s found in row %i'% (needle, r))
self.delRow(r)
r -=1
def delRowIfColEq (self,col, needle): pass
def renameCol (self, cnameOld, cnameNew):
c=self.cindex(cnameOld)
self.table[0][c]=cnameNew
def default_per_col (cname, default_value):
cid=td.cindex(cname)
for rid in range(1, td.nrows()):
if not td.cell (cid,rid):
self.table[rid][cid]=default_value
###
### converting to outside world
###
def _outTest(self,out):
if os.path.exists(out):
self.verbose('Output exists already, will be overwritten: %s' %out)
def write (self, out):
ext=os.path.splitext(out)[1][1:].lower()
if (ext == 'xml'):
self.writeXML (out)
elif (ext == 'csv'):
self.writeCSV (out)
elif (ext == 'json'):
self.writeJSON (out)
else:
print ('Format %s not recognized' % ext)
def writeCSV (self,outfile):
import csv
self._outTest(outfile)
with open(outfile, mode='w', newline='', encoding='utf-8') as csvfile:
out = csv.writer(csvfile, dialect='excel')
for r in range(0, self.nrows()):
row=self.table[r]
out.writerow(row)
self.verbose ('csv written to %s' % outfile)
def writeXML (self,out):
import xml.etree.ElementTree as ET
from xml.sax.saxutils import escape
root = ET.Element("tdx") #table data xml
self._outTest(out)
def _indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
_indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#don't need cnames here, so start at 1, but then write all columns in first row
for r in range(1, self.nrows()):
doc = ET.SubElement(root, "row")
for c in range(0, self.ncols()):
cell = self.cell(c,r)
if cell or r == 1:
ET.SubElement(doc, self.table[0][c]).text=escape(str(cell))
tree = ET.ElementTree(root)
_indent(root)
tree.write(out, encoding='UTF-8', xml_declaration=True)
self.verbose ('xml written to %s' % out)
def writeJSON (self, out):
import json
self._outTest(out)
f = open(out, 'w')
with f as outfile:
json.dump(self.table, outfile, default=str)
self.verbose ('json written to %s' % out)
if __name__ == '__main__': pass
| true
| true
|
790411e7bc5f3e702dbb4fea9c70d0af99e23705
| 3,075
|
py
|
Python
|
examine_vectors.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 24
|
2019-12-21T15:08:29.000Z
|
2022-01-18T16:41:41.000Z
|
examine_vectors.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 1
|
2021-09-06T08:57:35.000Z
|
2021-09-06T09:52:52.000Z
|
examine_vectors.py
|
ajitrajasekharan/bert_mask
|
33c7067134f2696b849fdb273443306026c5527d
|
[
"MIT"
] | 7
|
2019-12-17T17:10:59.000Z
|
2022-03-02T20:19:05.000Z
|
import torch
from transformers import *
import pdb
import operator
from collections import OrderedDict
import sys
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
PATH='bert-base-cased'
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(PATH,do_lower_case=False)
model = BertForMaskedLM.from_pretrained(PATH)
model.eval()
def get_sent():
print("Enter sentence:")
sent = input()
if (not sent.endswith(".")):
print("Appending period to do dummy masking")
sent = sent + " ."
return '[CLS] ' + sent + '[SEP]'
def print_tokens(tokenized_text):
dstr = ""
for i in range(len(tokenized_text)):
dstr += " " + str(i) + ":"+tokenized_text[i]
print(dstr)
print()
def get_pos():
while True:
masked_index = 0
try:
masked_index = int(input())
return masked_index
except:
print("Enter valid number: (0 to quit)")
masked_index = int(input())
if (masked_index == 0):
print("Quitting")
sys.exit()
return masked_index
while (True):
text = get_sent()
tokenized_text = tokenizer.tokenize(text)
print_tokens(tokenized_text)
#pdb.set_trace()
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
masked_index = len(tokenized_text) - 2
tokenized_text[masked_index] = "[MASK]"
indexed_tokens[masked_index] = 103
results_dict = {}
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
while True:
print_tokens(tokenized_text)
print("Enter any term position neighbor:")
masked_index = get_pos()
results_dict = {}
for i in range(len(predictions[0][0,masked_index])):
tok = tokenizer.convert_ids_to_tokens([i])[0]
results_dict[tok] = float(predictions[0][0,masked_index][i].tolist())
k = 0
hist_d = {}
sorted_d = OrderedDict(sorted(results_dict.items(), key=lambda kv: kv[1], reverse=True))
first = True
max_val = 0
for i in sorted_d:
if (first):
max_val = sorted_d[i]
first = False
val = round(float(sorted_d[i])/max_val,1)
if (val in hist_d):
hist_d[val] += 1
else:
hist_d[val] = 1
k += 1
if (k <= 20):
print(i,sorted_d[i])
fp = open("top_k.txt","w")
hist_d_sorted = OrderedDict(sorted(hist_d.items(), key=lambda kv: kv[0], reverse=False))
for i in hist_d_sorted:
fp.write(str(i) + " " + str(hist_d_sorted[i]) + "\n")
fp.close()
| 30.147059
| 100
| 0.59935
|
import torch
from transformers import *
import pdb
import operator
from collections import OrderedDict
import sys
import logging
logging.basicConfig(level=logging.INFO)
PATH='bert-base-cased'
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained(PATH,do_lower_case=False)
model = BertForMaskedLM.from_pretrained(PATH)
model.eval()
def get_sent():
print("Enter sentence:")
sent = input()
if (not sent.endswith(".")):
print("Appending period to do dummy masking")
sent = sent + " ."
return '[CLS] ' + sent + '[SEP]'
def print_tokens(tokenized_text):
dstr = ""
for i in range(len(tokenized_text)):
dstr += " " + str(i) + ":"+tokenized_text[i]
print(dstr)
print()
def get_pos():
while True:
masked_index = 0
try:
masked_index = int(input())
return masked_index
except:
print("Enter valid number: (0 to quit)")
masked_index = int(input())
if (masked_index == 0):
print("Quitting")
sys.exit()
return masked_index
while (True):
text = get_sent()
tokenized_text = tokenizer.tokenize(text)
print_tokens(tokenized_text)
#pdb.set_trace()
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
masked_index = len(tokenized_text) - 2
tokenized_text[masked_index] = "[MASK]"
indexed_tokens[masked_index] = 103
results_dict = {}
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
while True:
print_tokens(tokenized_text)
print("Enter any term position neighbor:")
masked_index = get_pos()
results_dict = {}
for i in range(len(predictions[0][0,masked_index])):
tok = tokenizer.convert_ids_to_tokens([i])[0]
results_dict[tok] = float(predictions[0][0,masked_index][i].tolist())
k = 0
hist_d = {}
sorted_d = OrderedDict(sorted(results_dict.items(), key=lambda kv: kv[1], reverse=True))
first = True
max_val = 0
for i in sorted_d:
if (first):
max_val = sorted_d[i]
first = False
val = round(float(sorted_d[i])/max_val,1)
if (val in hist_d):
hist_d[val] += 1
else:
hist_d[val] = 1
k += 1
if (k <= 20):
print(i,sorted_d[i])
fp = open("top_k.txt","w")
hist_d_sorted = OrderedDict(sorted(hist_d.items(), key=lambda kv: kv[0], reverse=False))
for i in hist_d_sorted:
fp.write(str(i) + " " + str(hist_d_sorted[i]) + "\n")
fp.close()
| true
| true
|
790412db427107f1867883f7ce1885b564a56b43
| 1,227
|
py
|
Python
|
cgan/utils.py
|
GodWriter/GAN-Pytorch
|
42e0657ae4844c9644a2c382de6af977733d9074
|
[
"Apache-2.0"
] | 8
|
2020-07-20T13:18:18.000Z
|
2021-03-22T07:14:03.000Z
|
cgan/utils.py
|
GodWriter/GAN-Pytorch
|
42e0657ae4844c9644a2c382de6af977733d9074
|
[
"Apache-2.0"
] | null | null | null |
cgan/utils.py
|
GodWriter/GAN-Pytorch
|
42e0657ae4844c9644a2c382de6af977733d9074
|
[
"Apache-2.0"
] | 1
|
2020-08-12T07:33:05.000Z
|
2020-08-12T07:33:05.000Z
|
import os
import imageio
import numpy as np
from PIL import Image
from torch.autograd import Variable
from torchvision.utils import save_image
def create_gif(image_path):
frames = []
gif_name = os.path.join("images", 'mnist1.gif')
image_list = os.listdir(image_path)
sorted(image_list)
for image_name in image_list:
frames.append(imageio.imread(os.path.join(image_path, image_name)))
imageio.mimsave(gif_name, frames, 'GIF', duration=0.1)
def resize_img(path):
names = os.listdir(path)
for name in names:
img_path = os.path.join(path, name)
img = Image.open(img_path)
img = img.resize((172, 172))
img.save(img_path)
def sample_image(opt, n_row, batches_done, generator, FloatTensor, LongTensor):
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%d.png" % batches_done, nrow=n_row, normalize=True)
if __name__ == "__main__":
image_path = "images/example1"
resize_img(image_path)
create_gif(image_path)
| 26.106383
| 90
| 0.691932
|
import os
import imageio
import numpy as np
from PIL import Image
from torch.autograd import Variable
from torchvision.utils import save_image
def create_gif(image_path):
frames = []
gif_name = os.path.join("images", 'mnist1.gif')
image_list = os.listdir(image_path)
sorted(image_list)
for image_name in image_list:
frames.append(imageio.imread(os.path.join(image_path, image_name)))
imageio.mimsave(gif_name, frames, 'GIF', duration=0.1)
def resize_img(path):
names = os.listdir(path)
for name in names:
img_path = os.path.join(path, name)
img = Image.open(img_path)
img = img.resize((172, 172))
img.save(img_path)
def sample_image(opt, n_row, batches_done, generator, FloatTensor, LongTensor):
z = Variable(FloatTensor(np.random.normal(0, 1, (n_row ** 2, opt.latent_dim))))
labels = np.array([num for _ in range(n_row) for num in range(n_row)])
labels = Variable(LongTensor(labels))
gen_imgs = generator(z, labels)
save_image(gen_imgs.data, "images/%d.png" % batches_done, nrow=n_row, normalize=True)
if __name__ == "__main__":
image_path = "images/example1"
resize_img(image_path)
create_gif(image_path)
| true
| true
|
79041390966a2807262471c30d556211b6a6eaf9
| 14,354
|
py
|
Python
|
char_PTB/char_ptb.py
|
CookieBox26/trellisnet
|
356b1d9828012cc4929728a492b771c44161e089
|
[
"MIT"
] | 442
|
2018-10-17T14:59:26.000Z
|
2022-03-13T11:24:42.000Z
|
TrellisNet/char_PTB/char_ptb.py
|
chr6192/trellisnet
|
ec1de0a5ee09ef5a4c5bca4c83456dec8cbdf4c8
|
[
"MIT"
] | 7
|
2018-12-07T18:44:04.000Z
|
2020-11-19T11:25:23.000Z
|
TrellisNet/char_PTB/char_ptb.py
|
chr6192/trellisnet
|
ec1de0a5ee09ef5a4c5bca4c83456dec8cbdf4c8
|
[
"MIT"
] | 70
|
2018-10-18T01:18:16.000Z
|
2021-08-16T13:59:48.000Z
|
import argparse
import torch.optim as optim
import sys
from utils import *
from data import data_generator
import time
import math
from setproctitle import setproctitle
import warnings
sys.path.append("../")
from model import TrellisNetModel
warnings.filterwarnings("ignore") # Suppress the RunTimeWarning on unicode
parser = argparse.ArgumentParser(description='PyTorch TrellisNet Language Model')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use')
parser.add_argument('--name', type=str, default='Trellis_charPTB',
help='name of the process')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1050,
help='number of hidden units per layer')
parser.add_argument('--nout', type=int, default=200,
help='number of output units')
parser.add_argument('--lr', type=float, default=2e-3,
help='initial learning rate (default: 2e-3)')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=400,
help='upper epoch limit (default: 400)')
parser.add_argument('--batch_size', type=int, default=24, metavar='N',
help='batch size')
# For most of the time, you should change these two together
parser.add_argument('--nlevels', type=int, default=140,
help='levels of the network')
parser.add_argument('--horizon', type=int, default=140,
help='The effective history size')
parser.add_argument('--dropout', type=float, default=0.1,
help='output dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.1,
help='input dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.26,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--emb_dropout', type=float, default=0.02,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.29,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights (default: True)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--anneal', type=int, default=5,
help='learning rate annealing criteria (default: 5)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use (default: Adam)')
parser.add_argument('--repack', action='store_false',
help='use repackaging (default: True)')
parser.add_argument('--eval', action='store_true',
help='evaluation only mode')
parser.add_argument('--aux', type=float, default=0.3,
help='use auxiliary loss (default: 0.3), -1 means no auxiliary loss used')
parser.add_argument('--aux_freq', type=float, default=80,
help='auxiliary loss frequency (default: 80)')
parser.add_argument('--seq_len', type=int, default=0,
help='total sequence length; if this is 0 then it defaults to args.horizon (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--when', nargs='+', type=int, default=[220, 350],
help='When to decay the learning rate')
parser.add_argument('--ksize', type=int, default=2,
help='conv kernel size (default: 2)')
parser.add_argument('--dilation', nargs='+', type=int, default=[1],
help='dilation rate (default: [1])')
parser.add_argument('--n_experts', type=int, default=0,
help='number of softmax experts (default: 0)')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
parser.add_argument('--load_weight', type=str, default='',
help='path to load the model weights (please only use --load or --load_weight)')
args = parser.parse_args()
args.save = args.name + ".pt"
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
###############################################################################
# Load data
###############################################################################
file, file_len, valfile, valfile_len, testfile, testfile_len, corpus = data_generator(args)
ntokens = len(corpus.dictionary)
eval_batch_size = 10
test_batch_size = 10
train_data = batchify(char_tensor(corpus, file), args.batch_size, args)
val_data = batchify(char_tensor(corpus, valfile), eval_batch_size, args)
test_data = batchify(char_tensor(corpus, testfile), eval_batch_size, args)
print(train_data.size(), val_data.size())
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("logs/" + args.name + ".log", "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
self.log.flush()
self.terminal.flush()
pass
sys.stdout = Logger()
###############################################################################
# Build the model
###############################################################################
if len(args.load) > 0:
print("Loaded model\n")
model = torch.load(args.load)
else:
model = TrellisNetModel(ntoken=ntokens,
ninp=args.emsize,
nhid=args.nhid,
nout=args.nout,
nlevels=args.nlevels,
kernel_size=args.ksize,
dilation=args.dilation,
dropout=args.dropout,
dropouti=args.dropouti,
dropouth=args.dropouth,
emb_dropout=args.emb_dropout,
wdrop=args.wdrop,
temporalwdrop=args.temporalwdrop,
tie_weights=args.tied,
repack=args.repack,
wnorm=args.wnorm,
aux=(args.aux > 0),
aux_frequency=args.aux_freq,
load=args.load_weight)
if args.cuda:
model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = getattr(optim, args.optim)(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
###############################################################################
# Training code
###############################################################################
def evaluate(data_source):
model.eval()
with torch.no_grad():
total_loss = 0
hidden = model.init_hidden(eval_batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
processed_data_size = 0
for i in range(0, data_source.size(0) - 1, validseqlen):
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= data_source.size(0) - 1: continue
data, targets = get_batch(data_source, i, seq_len, evaluation=True)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(eval_batch_size)
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
loss = criterion(final_decoded, targets)
loss = loss.data
total_loss += (data.size(1) - eff_history) * loss
processed_data_size += data.size(1) - eff_history
decoded = None
final_decoded = None
targets = None
all_decoded = None # This is for auxiliary losses; not used in evaluation
return total_loss.item() / processed_data_size
def train(epoch):
model.train()
total_loss = 0
total_aux_losses = 0
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
eff_history_mode = (args.seq_len > args.horizon and not args.repack)
if eff_history_mode:
validseqlen = args.seq_len - args.horizon
seq_len = args.seq_len
else:
validseqlen = args.horizon
seq_len = args.horizon
for batch, i in enumerate(range(0, train_data.size(0) - 1, validseqlen)):
# When not using repackaging mode, we DISCARD the first arg.horizon outputs in backprop (which are
# the "effective history".
eff_history = args.horizon if eff_history_mode else 0
if i + eff_history >= train_data.size(0) - 1: continue
data, targets = get_batch(train_data, i, seq_len)
if args.repack:
hidden = repackage_hidden(hidden)
else:
hidden = model.init_hidden(args.batch_size)
optimizer.zero_grad()
data = data.t()
net = nn.DataParallel(model) if data.size(0) > 10 else model
(_, _, decoded), hidden, all_decoded = net(data, hidden)
decoded = decoded.transpose(0, 1)
targets = targets[eff_history:].contiguous().view(-1)
final_decoded = decoded[eff_history:].contiguous().view(-1, ntokens)
# Loss 1: CE loss
raw_loss = criterion(final_decoded, targets)
# Loss 2: Aux loss
aux_losses = 0
if args.aux > 0:
all_decoded = all_decoded[:, :, eff_history:].permute(1, 2, 0, 3).contiguous()
aux_size = all_decoded.size(0)
all_decoded = all_decoded.view(aux_size, -1, ntokens)
aux_losses = args.aux * sum([criterion(all_decoded[i], targets) for i in range(aux_size)])
# Combine losses
loss = raw_loss + aux_losses
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += raw_loss.data
if args.aux:
total_aux_losses += aux_losses.data
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / args.log_interval
cur_aux_loss = total_aux_losses.item() / args.log_interval if args.aux else 0
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.5f} | ms/batch {:5.2f} | '
'raw_loss {:5.3f} | aux_loss {:5.2f} | bpc {:5.3f}'.format(
epoch, batch, len(train_data) // validseqlen, lr,
elapsed * 1000 / args.log_interval, cur_loss, cur_aux_loss, cur_loss / math.log(2)))
total_loss = 0
total_aux_losses = 0
start_time = time.time()
sys.stdout.flush()
decoded = None
targets = None
final_decoded = None
all_decoded = None
def inference(epoch):
val_loss = evaluate(val_data)
print('-' * 89)
print('| End of epoch {:3d} | valid loss {:5.3f} | valid bpc {:8.3f}'.format(
epoch, val_loss, val_loss / math.log(2)))
test_loss = evaluate(test_data)
print('| End of epoch {:3d} | test loss {:5.3f} | test bpc {:8.3f}'.format(
epoch, test_loss, test_loss / math.log(2)))
print('-' * 89)
return val_loss, test_loss
if args.eval:
print("Eval only mode")
inference(-1)
sys.exit(0)
lr = args.lr
best_val_loss = None
all_val_losses = []
all_test_losses = []
try:
for epoch in range(1, args.epochs + 1):
loss = train(epoch)
val_loss, test_loss = inference(epoch)
if not best_val_loss or val_loss < best_val_loss:
print("Saving model (new best validation) in " + args.save)
save(model, args)
best_val_loss = val_loss
if epoch in args.when:
print("\n" + "*" * 89)
if lr > 1e-5:
print("Annealing learning rate")
lr = lr / 10.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
all_val_losses.append(val_loss)
all_test_losses.append(test_loss)
sys.stdout.flush()
except KeyboardInterrupt:
print('-' * 89)
print("Saving before quit...")
save(model, args)
# Load the best saved model
with open(args.save, 'rb') as f:
model = torch.load(f)
model.save_weights('weights/pretrained_charptb.pkl')
# Run on test data
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.3f} | test bpc {:8.3f}'.format(
test_loss, test_loss / math.log(2)))
print('=' * 89)
| 39.111717
| 109
| 0.585063
|
import argparse
import torch.optim as optim
import sys
from utils import *
from data import data_generator
import time
import math
from setproctitle import setproctitle
import warnings
sys.path.append("../")
from model import TrellisNetModel
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch TrellisNet Language Model')
parser.add_argument('--dataset', type=str, default='ptb',
help='dataset to use')
parser.add_argument('--name', type=str, default='Trellis_charPTB',
help='name of the process')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=1050,
help='number of hidden units per layer')
parser.add_argument('--nout', type=int, default=200,
help='number of output units')
parser.add_argument('--lr', type=float, default=2e-3,
help='initial learning rate (default: 2e-3)')
parser.add_argument('--clip', type=float, default=0.2,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=400,
help='upper epoch limit (default: 400)')
parser.add_argument('--batch_size', type=int, default=24, metavar='N',
help='batch size')
parser.add_argument('--nlevels', type=int, default=140,
help='levels of the network')
parser.add_argument('--horizon', type=int, default=140,
help='The effective history size')
parser.add_argument('--dropout', type=float, default=0.1,
help='output dropout (0 = no dropout)')
parser.add_argument('--dropouti', type=float, default=0.1,
help='input dropout (0 = no dropout)')
parser.add_argument('--wdrop', type=float, default=0.26,
help='dropout applied to weights (0 = no dropout)')
parser.add_argument('--emb_dropout', type=float, default=0.02,
help='dropout applied to embedding layer (0 = no dropout)')
parser.add_argument('--dropouth', type=float, default=0.29,
help='dropout applied to hidden layers (0 = no dropout)')
parser.add_argument('--wdecay', type=float, default=8e-7,
help='weight decay')
parser.add_argument('--tied', action='store_false',
help='tie the word embedding and softmax weights (default: True)')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--anneal', type=int, default=5,
help='learning rate annealing criteria (default: 5)')
parser.add_argument('--cuda', action='store_false',
help='use CUDA (default: True)')
parser.add_argument('--wnorm', action='store_false',
help='use weight normalization (default: True)')
parser.add_argument('--temporalwdrop', action='store_false',
help='only drop the temporal weights (default: True)')
parser.add_argument('--optim', type=str, default='Adam',
help='optimizer to use (default: Adam)')
parser.add_argument('--repack', action='store_false',
help='use repackaging (default: True)')
parser.add_argument('--eval', action='store_true',
help='evaluation only mode')
parser.add_argument('--aux', type=float, default=0.3,
help='use auxiliary loss (default: 0.3), -1 means no auxiliary loss used')
parser.add_argument('--aux_freq', type=float, default=80,
help='auxiliary loss frequency (default: 80)')
parser.add_argument('--seq_len', type=int, default=0,
help='total sequence length; if this is 0 then it defaults to args.horizon (default: 0)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='report interval')
parser.add_argument('--when', nargs='+', type=int, default=[220, 350],
help='When to decay the learning rate')
parser.add_argument('--ksize', type=int, default=2,
help='conv kernel size (default: 2)')
parser.add_argument('--dilation', nargs='+', type=int, default=[1],
help='dilation rate (default: [1])')
parser.add_argument('--n_experts', type=int, default=0,
help='number of softmax experts (default: 0)')
parser.add_argument('--load', type=str, default='',
help='path to load the model')
parser.add_argument('--load_weight', type=str, default='',
help='path to load the model weights (please only use --load or --load_weight)')
args = parser.parse_args()
args.save = args.name + ".pt"
torch.manual_seed(args.seed)
setproctitle(args.name)
torch.set_default_tensor_type('torch.FloatTensor')
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
torch.cuda.manual_seed(args.seed)
| true
| true
|
7904143cc3239879e6410db14e5d448d160ace9f
| 842
|
py
|
Python
|
main.py
|
caxenie/oom-oop-intro
|
e70cd2ab8067b4d81001b7491deab9e029eefd3d
|
[
"MIT"
] | 1
|
2022-02-23T20:18:35.000Z
|
2022-02-23T20:18:35.000Z
|
main.py
|
caxenie/oom-oop-intro
|
e70cd2ab8067b4d81001b7491deab9e029eefd3d
|
[
"MIT"
] | null | null | null |
main.py
|
caxenie/oom-oop-intro
|
e70cd2ab8067b4d81001b7491deab9e029eefd3d
|
[
"MIT"
] | null | null | null |
from eingabe import EinsatzstoffeEingabe
from ausgabe import *
from verarbeitung import *
from plannung import *
if __name__ == '__main__':
eingabe1 = EinsatzstoffeEingabe(100000, "Erz", "Indien")
eingabe2 = EinsatzstoffeEingabe(59000, "Kochen", "Rumänien")
ausgabe1 = ProzessAusgabe(100, 200, "Schienen")
ausgabe2 = ProzessAusgabe(300, 1200, "Rohre")
verarbeitung0 = StahlVerarbeitung(1, "Walzwerk", 4)
verarbeitung1 = VorVerarbeitung(1, "Walzwerk", 2, ausgabe1)
verarbeitung2 = HauptVerarbeitung(1, "Stahlwerk", 3, ausgabe2)
verarbeitung0.getSchrittDauer()
verarbeitung1.getSchrittDauer()
verarbeitung2.getSchrittDauer()
plannung1 = StahlProzessPlannung(2, "Vorverarbeitung", verarbeitung1)
plannung2 = StahlProzessPlannung(2, "Hauptverarbeitung", verarbeitung2)
| 35.083333
| 76
| 0.723278
|
from eingabe import EinsatzstoffeEingabe
from ausgabe import *
from verarbeitung import *
from plannung import *
if __name__ == '__main__':
eingabe1 = EinsatzstoffeEingabe(100000, "Erz", "Indien")
eingabe2 = EinsatzstoffeEingabe(59000, "Kochen", "Rumänien")
ausgabe1 = ProzessAusgabe(100, 200, "Schienen")
ausgabe2 = ProzessAusgabe(300, 1200, "Rohre")
verarbeitung0 = StahlVerarbeitung(1, "Walzwerk", 4)
verarbeitung1 = VorVerarbeitung(1, "Walzwerk", 2, ausgabe1)
verarbeitung2 = HauptVerarbeitung(1, "Stahlwerk", 3, ausgabe2)
verarbeitung0.getSchrittDauer()
verarbeitung1.getSchrittDauer()
verarbeitung2.getSchrittDauer()
plannung1 = StahlProzessPlannung(2, "Vorverarbeitung", verarbeitung1)
plannung2 = StahlProzessPlannung(2, "Hauptverarbeitung", verarbeitung2)
| true
| true
|
7904146020cf731e027e576e42561b71ee9d92c4
| 19,173
|
py
|
Python
|
config/jupyter/jupyter_notebook_config.py
|
alonsoir/pipeline
|
1480df925a840b6e45f163caf78d50829eb94ff0
|
[
"Apache-2.0"
] | 1
|
2019-01-16T05:52:53.000Z
|
2019-01-16T05:52:53.000Z
|
config/jupyter/jupyter_notebook_config.py
|
TrinathY/pipeline
|
a08cabfefd3521a880d11f1e4b02286287118b6c
|
[
"Apache-2.0"
] | null | null | null |
config/jupyter/jupyter_notebook_config.py
|
TrinathY/pipeline
|
a08cabfefd3521a880d11f1e4b02286287118b6c
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
c.JupyterApp.answer_yes = True
# Full path of a config file.
# c.JupyterApp.config_file = u''
# Generate default config file.
# c.JupyterApp.generate_config = False
# Specify a config file to load.
# c.JupyterApp.config_file_name = u''
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 0
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# Note: These extensions require the ~/.jupyter path to exist otherwise, errors will occur on startup
c.NotebookApp.server_extensions=['ipyparallel.nbextension']
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# The port the notebook server will listen on.
c.NotebookApp.port = 8754
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
c.NotebookApp.allow_origin = '*'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'>
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/'
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'/root/pipeline/myapps/jupyter/'
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = u''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
#
# c.NotebookApp.file_to_run = ''
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = u''
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = traitlets.Undefined
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'username'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = traitlets.Undefined
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python2'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#
# c.ContentsManager.checkpoints = traitlets.Undefined
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = u''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| 36.942197
| 109
| 0.668388
|
c.JupyterApp.answer_yes = True
c.NotebookApp.port_retries = 0
c.NotebookApp.ip = '*'
c.NotebookApp.server_extensions=['ipyparallel.nbextension']
c.NotebookApp.port = 8754
c.NotebookApp.allow_origin = '*'
c.NotebookApp.base_url = '/'
c.NotebookApp.notebook_dir = u'/root/pipeline/myapps/jupyter/'
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python2'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#
# c.ContentsManager.checkpoints = traitlets.Undefined
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = u''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| true
| true
|
79041484b8c6a9a76e5f13c17b77c7b297ee0ad2
| 17,044
|
py
|
Python
|
lib/python2.7/site-packages/django_filters/filterset.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/django_filters/filterset.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | null | null | null |
lib/python2.7/site-packages/django_filters/filterset.py
|
ervinpepic/E-commerce
|
2c15255d1730728cf35c166b9f88cffcb99f5323
|
[
"MIT"
] | 2
|
2019-04-29T14:16:10.000Z
|
2020-07-23T12:04:17.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
import types
import copy
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from sys import version_info
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
try:
from django.db.models.related import RelatedObject as ForeignObjectRel
except ImportError: # pragma: nocover
# Django >= 1.8 replaces RelatedObject with ForeignObjectRel
from django.db.models.fields.related import ForeignObjectRel
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
class STRICTNESS(object):
"""
Values of False & True chosen for backward compatability reasons.
Originally, these were the only options.
"""
IGNORE = False
RETURN_NO_RESULTS = True
RAISE_VALIDATION_ERROR = "RAISE"
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, ForeignObjectRel):
if hasattr(rel, "related_model"):
# django >= 1.8 (ForeignObjectRel)
opts = rel.related_model._meta
else:
# django < 1.8 (RelatedObject)
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
# Loop through the list of fields.
for f in fields:
# Skip the field if excluded.
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
# Do nothing if the field doesn't exist.
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_type in fields[f]:
filter_ = filter_for_field(field, f, lookup_type)
if filter_:
filter_name = f
# Don't add "exact" to filter names
if lookup_type != 'exact':
filter_name = f + LOOKUP_SEP + lookup_type
field_dict[filter_name] = filter_
# If fields is a list, it contains strings.
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
def get_full_clean_override(together):
def full_clean(form):
def add_error(message):
try:
form.add_error(None, message)
except AttributeError:
form._errors[NON_FIELD_ERRORS] = message
def all_valid(fieldset):
cleaned_data = form.cleaned_data
count = len([i for i in fieldset if cleaned_data.get(i)])
return 0 < count < len(fieldset)
super(form.__class__, form).full_clean()
message = 'Following fields must be together: %s'
if isinstance(together[0], (list, tuple)):
for each in together:
if all_valid(each):
return add_error(message % ','.join(each))
elif all_valid(together):
return add_error(message % ','.join(together))
return full_clean
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
self.together = getattr(options, 'together', None)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
# What to do on on validation errors
strict = STRICTNESS.RETURN_NO_RESULTS
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.is_bound and not valid:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise forms.ValidationError(self.form.errors)
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self._meta.together:
Form.full_clean = get_full_clean_override(self._meta.together)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name, lookup_type='exact'):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_type': lookup_type
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
# could be a derived field, inspect parents
for class_ in f.__class__.mro():
# skip if class_ is models.Field or object
# 1st item in mro() is original class
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| 33.884692
| 106
| 0.582962
|
from __future__ import absolute_import
from __future__ import unicode_literals
import types
import copy
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from sys import version_info
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
try:
from collections import OrderedDict
except ImportError:
from django.utils.datastructures import SortedDict as OrderedDict
try:
from django.db.models.related import RelatedObject as ForeignObjectRel
except ImportError:
from django.db.models.fields.related import ForeignObjectRel
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
class STRICTNESS(object):
IGNORE = False
RETURN_NO_RESULTS = True
RAISE_VALIDATION_ERROR = "RAISE"
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, ForeignObjectRel):
if hasattr(rel, "related_model"):
opts = rel.related_model._meta
else:
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
for f in fields:
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_type in fields[f]:
filter_ = filter_for_field(field, f, lookup_type)
if filter_:
filter_name = f
# Don't add "exact" to filter names
if lookup_type != 'exact':
filter_name = f + LOOKUP_SEP + lookup_type
field_dict[filter_name] = filter_
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
def get_full_clean_override(together):
def full_clean(form):
def add_error(message):
try:
form.add_error(None, message)
except AttributeError:
form._errors[NON_FIELD_ERRORS] = message
def all_valid(fieldset):
cleaned_data = form.cleaned_data
count = len([i for i in fieldset if cleaned_data.get(i)])
return 0 < count < len(fieldset)
super(form.__class__, form).full_clean()
message = 'Following fields must be together: %s'
if isinstance(together[0], (list, tuple)):
for each in together:
if all_valid(each):
return add_error(message % ','.join(each))
elif all_valid(together):
return add_error(message % ','.join(together))
return full_clean
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
self.together = getattr(options, 'together', None)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
# What to do on on validation errors
strict = STRICTNESS.RETURN_NO_RESULTS
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.is_bound and not valid:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise forms.ValidationError(self.form.errors)
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self._meta.together:
Form.full_clean = get_full_clean_override(self._meta.together)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name, lookup_type='exact'):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_type': lookup_type
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
for class_ in f.__class__.mro():
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| true
| true
|
790414eed7a4aee29cbcf69ab462c7f4242d3ab6
| 6,327
|
py
|
Python
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/_key_vault_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/_key_vault_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2021_04_01_preview/_key_vault_management_client.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import KeyVaultManagementClientConfiguration
from .operations import VaultsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import ManagedHsmsOperations
from .operations import MHSMPrivateEndpointConnectionsOperations
from .operations import MHSMPrivateLinkResourcesOperations
from .operations import Operations
from . import models
class KeyVaultManagementClient(object):
"""The Azure management API provides a RESTful set of web services that interact with Azure Key Vault.
:ivar vaults: VaultsOperations operations
:vartype vaults: azure.mgmt.keyvault.v2021_04_01_preview.operations.VaultsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.PrivateLinkResourcesOperations
:ivar managed_hsms: ManagedHsmsOperations operations
:vartype managed_hsms: azure.mgmt.keyvault.v2021_04_01_preview.operations.ManagedHsmsOperations
:ivar mhsm_private_endpoint_connections: MHSMPrivateEndpointConnectionsOperations operations
:vartype mhsm_private_endpoint_connections: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateEndpointConnectionsOperations
:ivar mhsm_private_link_resources: MHSMPrivateLinkResourcesOperations operations
:vartype mhsm_private_link_resources: azure.mgmt.keyvault.v2021_04_01_preview.operations.MHSMPrivateLinkResourcesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.keyvault.v2021_04_01_preview.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = KeyVaultManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.vaults = VaultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.managed_hsms = ManagedHsmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_endpoint_connections = MHSMPrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_link_resources = MHSMPrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> KeyVaultManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 52.725
| 172
| 0.739687
|
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import KeyVaultManagementClientConfiguration
from .operations import VaultsOperations
from .operations import PrivateEndpointConnectionsOperations
from .operations import PrivateLinkResourcesOperations
from .operations import ManagedHsmsOperations
from .operations import MHSMPrivateEndpointConnectionsOperations
from .operations import MHSMPrivateLinkResourcesOperations
from .operations import Operations
from . import models
class KeyVaultManagementClient(object):
def __init__(
self,
credential,
subscription_id,
base_url=None,
**kwargs
):
if not base_url:
base_url = 'https://management.azure.com'
self._config = KeyVaultManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.vaults = VaultsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.managed_hsms = ManagedHsmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_endpoint_connections = MHSMPrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.mhsm_private_link_resources = MHSMPrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| true
| true
|
790415bee65970c117ada3a9e0c7250a6b0058c9
| 6,067
|
py
|
Python
|
venv/Lib/site-packages/tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py
|
ishatserka/MachineLearningAndDataAnalysisCoursera
|
e82e772df2f4aec162cb34ac6127df10d14a625a
|
[
"MIT"
] | 1
|
2019-11-29T12:28:33.000Z
|
2019-11-29T12:28:33.000Z
|
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('PeriodicResample')
def periodic_resample(values, shape, name=None):
r"""Periodically resample elements of a tensor to conform to `shape`.
This function implements a slightly more generic version of the subpixel
convolutions found in this [paper](https://arxiv.org/abs/1609.05158).
The formula for computing the elements in the `output` tensor is as follows:
`T` = `values` tensor of rank `R`
`S` = desired `shape` of output tensor (vector of length `R`)
`P` = `output` tensor of rank `R`
\((T_1,\ldots,T_R)\) = shape(`T`)
\([S_1,\ldots,S_q,\ldots,S_R]\) = elements of vector `S`
A single element in `S` is left unspecified (denoted \(S_q=-1\)).
Let \(f_i\) denote the (possibly non-integer) factor that relates the original
dimension to the desired dimensions, \(S_i=f_i T_i\), for \(i\neq q\) where
\(f_i>0\).
Define the following:
\(g_i=\lceil f_i\rceil\)
\(t=\prod_i T_i\)
\(s=\prod_{i\neq q} S_i\)
\(S_q\) can then be defined as by \(S_q=\lfloor t/s\rfloor\).
The elements of the resulting tensor are defined as
\(P_{s_1,\ldots,s_R}=T_{h_1,\ldots,h_q,\ldots,h_R}\).
The \(h_i\) (\(i\neq q\)) are defined by \(h_i=\lfloor s_i/g_i\rfloor\).
\(h_q=S_q\sum_{j\neq q}^{q-1}G_j \mathrm{mod}(s_j,g_j) + s_q\), where
\(G_j=\prod_{i}^{j-1}g_i\) (\(G_0=1\)).
One drawback of this method is that whenever the output dimensions are slightly
less than integer multiples of the input dimensions, many of the tensor elements
are repeated in an inefficient way. This is resolved by specifying that all
desired dimensions are integer multiples of the input tensor.
For example:
```prettyprint
`input` is [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
tf.periodic_resample(input, [6, None]) ==> [[ 0 1]
[ 2 3]
[ 4 5]
[ 6 7]
[ 8 9]
[10 11]]
```
Args:
values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.
The tensor of rank `R` to periodic_resample
shape: A `tf.TensorShape` or list of `ints`.
A 1-D tensor representing the desired shape of the output tensor.
Exactly one element of this tensor must have the value `None` which represents
that this dimension of `values` can be adjusted downward in order to
accommodate increases in other dimensions. The specified sizes of the
non-adjustable dimensions must by at least as large as in the `values` tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
Periodically resampled tensor that has dimensions specified as in
`shape` except that the dimension specified as `None` will be minimally
decreased as necessary.
"""
shape = _execute.make_shape(shape, "shape")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"PeriodicResample", values=values, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "shape", _op.get_attr("shape"))
else:
_attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)
_inputs_flat = [values]
_attrs = ("T", _attr_T, "shape", shape)
_result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PeriodicResample", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "PeriodicResample"
# input_arg {
# name: "values"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT64
# type: DT_INT32
# type: DT_UINT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_BFLOAT16
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n^\n\020PeriodicResample\022\013\n\006values\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\t\003\004\021\005\006\010\022\013\014\r\023\026\027\016\"\016\n\005shape\022\005shape")
| 38.891026
| 247
| 0.647602
|
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('PeriodicResample')
def periodic_resample(values, shape, name=None):
shape = _execute.make_shape(shape, "shape")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"PeriodicResample", values=values, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "shape", _op.get_attr("shape"))
else:
_attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)
_inputs_flat = [values]
_attrs = ("T", _attr_T, "shape", shape)
_result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PeriodicResample", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_op_def_lib = _InitOpDefLibrary(b"\n^\n\020PeriodicResample\022\013\n\006values\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\t\003\004\021\005\006\010\022\013\014\r\023\026\027\016\"\016\n\005shape\022\005shape")
| true
| true
|
7904170d3c8142ffcd70d73d75dd5cdaa82082ab
| 1,122
|
py
|
Python
|
scripts/pivot_cluster_day.py
|
isabella232/allsongsconsidered-poll
|
f4b63effcf57c6b6680eac9f11a55cd0541e358c
|
[
"MIT"
] | 3
|
2018-01-04T12:07:28.000Z
|
2018-04-10T02:10:27.000Z
|
scripts/pivot_cluster_day.py
|
nprapps/allsongsconsidered-poll
|
f4b63effcf57c6b6680eac9f11a55cd0541e358c
|
[
"MIT"
] | 1
|
2021-02-24T06:47:12.000Z
|
2021-02-24T06:47:12.000Z
|
scripts/pivot_cluster_day.py
|
isabella232/allsongsconsidered-poll
|
f4b63effcf57c6b6680eac9f11a55cd0541e358c
|
[
"MIT"
] | 3
|
2018-08-03T22:10:04.000Z
|
2022-03-23T11:33:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import numpy as np
import pandas as pd
def run(args):
data = pd.read_csv(sys.stdin)
# Find maximum rank value and increase by one to use as a fill_value
# on the pivot with cluster by day
# notfound_value = grouped['rank'].max()+1
# #create pivot table and fill non existing with high number i.e:200
pivot = pd.pivot_table(data,
values='rank',
index='Cluster ID',
columns=['day'],
fill_value=args.notfound_value,
aggfunc=np.sum)
# Write output
pivot.to_csv(sys.stdout)
if __name__ == '__main__':
# Parse command-line arguments.
parser = argparse.ArgumentParser(
description="Pivot table by cluster and day of the poll")
parser.add_argument('--notfound_value',
type=int,
help="value to assign to N/A values on pivot table",
required=True)
args = parser.parse_args()
run(args)
| 29.526316
| 76
| 0.564171
|
import argparse
import sys
import numpy as np
import pandas as pd
def run(args):
data = pd.read_csv(sys.stdin)
s='rank',
index='Cluster ID',
columns=['day'],
fill_value=args.notfound_value,
aggfunc=np.sum)
pivot.to_csv(sys.stdout)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Pivot table by cluster and day of the poll")
parser.add_argument('--notfound_value',
type=int,
help="value to assign to N/A values on pivot table",
required=True)
args = parser.parse_args()
run(args)
| true
| true
|
7904176478811e02fa8f18ce975f9b7bee88c17a
| 488
|
py
|
Python
|
visitor_manage/src/migrations/0011_auto_20200622_1909.py
|
parth-27/Visitor-Management-System
|
575b7ad1a4eecf65e764399ea53e836b8cf1768d
|
[
"MIT"
] | 3
|
2020-06-30T15:46:56.000Z
|
2021-11-17T13:13:15.000Z
|
visitor_manage/src/migrations/0011_auto_20200622_1909.py
|
DipikaPawar12/Visitor-Management-System
|
9585d8613daa4f5a0b0a81cce6850f79db768a64
|
[
"MIT"
] | 1
|
2021-06-10T19:39:03.000Z
|
2021-06-10T19:39:03.000Z
|
visitor_manage/src/migrations/0011_auto_20200622_1909.py
|
DipikaPawar12/Visitor-Management-System
|
9585d8613daa4f5a0b0a81cce6850f79db768a64
|
[
"MIT"
] | 1
|
2020-06-30T15:49:07.000Z
|
2020-06-30T15:49:07.000Z
|
# Generated by Django 3.0.6 on 2020-06-22 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0010_temporaryuser'),
]
operations = [
migrations.RemoveField(
model_name='admin',
name='username',
),
migrations.AlterField(
model_name='temporaryuser',
name='mail',
field=models.EmailField(max_length=320),
),
]
| 21.217391
| 52
| 0.567623
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0010_temporaryuser'),
]
operations = [
migrations.RemoveField(
model_name='admin',
name='username',
),
migrations.AlterField(
model_name='temporaryuser',
name='mail',
field=models.EmailField(max_length=320),
),
]
| true
| true
|
7904181897d18bef74f6721d936b2d86fe42d63f
| 1,124
|
py
|
Python
|
reco_utils/recommender/geoimc/geoimc_utils.py
|
suhoy901/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 28
|
2021-11-12T08:26:40.000Z
|
2022-03-27T07:21:24.000Z
|
reco_utils/recommender/geoimc/geoimc_utils.py
|
shobhit-agarwal/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 5
|
2021-11-10T02:58:32.000Z
|
2022-03-21T16:13:11.000Z
|
reco_utils/recommender/geoimc/geoimc_utils.py
|
shobhit-agarwal/recommenders
|
8ec9f1950d694a5aeaa3d463ac23cad661a30a11
|
[
"MIT"
] | 9
|
2021-11-03T07:14:47.000Z
|
2022-02-22T13:42:04.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
"""Length normalize the matrix
Args:
matrix (np.ndarray): Input matrix that needs to be normalized
Returns:
Normalized matrix
"""
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
"""Performs mean centering across axis 0
Args:
matrix (np.ndarray): Input matrix that needs to be mean centered
"""
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
"""Reduce dimensionality of the data using PCA.
Args:
matrix (np.ndarray): Matrix of the form (n_sampes, n_features)
target_dim (uint): Dimension to which n_features should be reduced to.
"""
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix)
| 24.977778
| 78
| 0.685943
|
import numpy as np
from sklearn.decomposition import PCA
from reco_utils.dataset.download_utils import maybe_download
from IPython import embed
def length_normalize(matrix):
norms = np.sqrt(np.sum(matrix**2, axis=1))
norms[norms == 0] = 1
return matrix / norms[:, np.newaxis]
def mean_center(matrix):
avg = np.mean(matrix, axis=0)
matrix -= avg
def reduce_dims(matrix, target_dim):
model = PCA(n_components=target_dim)
model.fit(matrix)
return model.transform(matrix)
| true
| true
|
7904183312d4874c673a84bb789e7b6e2a22235a
| 20,353
|
py
|
Python
|
Chapter5_LossFunctions/DarkWorldsMetric.py
|
RKDSOne/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
a3d172bd92a8eab8f2eb87ebe707b9a102eec747
|
[
"MIT"
] | 17
|
2016-11-19T14:11:06.000Z
|
2021-11-27T09:18:18.000Z
|
Chapter5_LossFunctions/DarkWorldsMetric.py
|
djv/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
ff9ee06a677efa522939f25d95ac87b9804440dc
|
[
"MIT"
] | null | null | null |
Chapter5_LossFunctions/DarkWorldsMetric.py
|
djv/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
ff9ee06a677efa522939f25d95ac87b9804440dc
|
[
"MIT"
] | 9
|
2015-10-08T09:40:12.000Z
|
2021-01-18T08:26:29.000Z
|
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possile configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true) #Only works for number of halso > 1
num_configurations=mt.factorial(num_halos) #The number of possible different comb
configurations=np.zeros([num_halos,num_configurations],int) #The array of combinations
#I will pass back
distances = np.zeros([num_configurations],float) #THe array of the distances
#for all possible combinations
radial_distance=[] #The vector of distances
#I will pass back
#Pick a combination of true and predicted
a=['01','012'] #Input for the permutatiosn, 01 number halos or 012
count=0 #For the index of the distances array
true_halo_indexes=[] #The tuples which will show the order of halos picked
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float) #The distance between eac
#true and predicted
#halo for every comb
true_halo_indexes_perm=[] #log of all the permutations of true halos used
predicted_halo_indexes_perm=[] #log of all the predicted permutations
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos): #loop through all the true halos with the
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
#This array logs the distance between true and
#predicted halo for ALL configruations
which_true_halos.append(j) #logthe order in which I try each true halo
which_predicted_halos.append(int(perm[j])) #log the order in which I true
#each predicted halo
true_halo_indexes_perm.append(which_true_halos) #this is a tuple of tuples of
#all of thifferent config
#true halo indexes
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::]) #Find what the total distances
#are for each configuration
count=count+1
config = np.where(distances == min(distances))[0][0] #The configuration used is the one
#which has the smallest distance
radial_distance.append(distances_perm[config,0::]) #Find the tuple of distances that
#correspond to this smallest distance
true_halo_indexes=true_halo_indexes_perm[config] #Find the tuple of the index which refers
#to the smallest distance
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float) #Set up the array which will pass back the values
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
# Angle at which the halo is at
#with respect to the reference poitn
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted])) # Angle of the estimate
#wrt true halo centre
#Before finding the angle with the zero line as the line joiing the halo and the reference
#point I need to convert the angle produced by Python to an angle between 0 and 2pi
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi #The angle with the baseline as the line joing the ref and the halo
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi #If the angle of the true pos wrt the ref is
#greater than the angle of predicted pos
#and the true pos then add 2pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referrin to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
#Find the weighted average of the x and y coordinates
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
num_halos_total=0 #Keep track of how many halos are iput into the metric
for selectskyinsolutions, sky in enumerate(sky_prediction): #Loop through each line in result.csv and analyse each one
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[1]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float) # The array which I will log all the calculated radial distances
angle=np.array([],dtype=float) #The array which I will log all the calculated angles
#Load in the sky_ids from the true
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb')) #Load in the sky_ids from the solution file
for row in sky_loader:
true_sky_id.append(row[0])
#Load in the true values from the solution file
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0 #Keep track of how many halos are iput into the metric
sky_prediction = c.reader(open(user_fname, 'rb')) #Open the result.csv
try: #See if the input file from user has a header on it
#with open('JoyceTest/trivialUnitTest_Pred.txt', 'r') as f:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1]) #try and make where the
#first input would be
#a float, if succed its
#not a header
print 'THE INPUT FILE DOESNT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction: #Loop through each line in result.csv and analyse each one
sky_id = str(sky[0]) #Get the sky_id of the input
does_it_exist=true_sky_id.count(sky_id) #Is the input sky_id
#from user a real one?
if does_it_exist > 0: #If it does then find the matching solutions to the sky_id
selectskyinsolutions=true_sky_id.index(sky_id)-1
else: #Otherwise exit
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])#How many halos in the
#selected sky?
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1])) #get the predictd values
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
#The solution file for the test data provides masses
#to calculate the centre of mass where as the Training_halo.csv
#direct provides x_ref y_ref. So in the case of test data
#we need to calculae the ref point from the masses using
#Get_ref()
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
#Single halo case, this needs to be separately caluclated since
#x_ref = x_true
if nhalo == 1:
#What is the radial distance between the true and predicted position
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
#What is the angle between the predicted position and true halo position
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
#r_index_index, contains the radial distances of the predicted to
#true positions. These are found by matching up the true halos to
#the predicted halos such that the average of all the radial distances
#is optimal. it also contains indexes of the halos used which are used to
#show which halo has been mathced to which.
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1] #The true halos indexes matched with the
predicted_index=r_index_index[2] #predicted halo index
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref)) # Find the angles of the predicted
#position wrt to the halo and
# add to the vector angle
# Find what the average distance the estimate is from the halo position
av_r=sum(r)/len(r)
#In order to quanitfy the orientation invariance we will express each angle
# as a vector and find the average vecor
#R_bar^2=(1/N Sum^Ncos(theta))^2+(1/N Sum^Nsin(theta))**2
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000. #Weight the av_r such that < 1 i a good score > 1 isnt so good.
W2=1.
metric = W1*av_r + W2*angle_vec #Weighted metric, weights TBD
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
#For help just typed 'python DarkWorldsMetric.py -h'
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
| 46.896313
| 188
| 0.604284
|
""" DarkWorldsMetricMountianOsteric.py
Custom evaluation metric for the 'Observing Dark Worlds' competition.
[Description of metric, or reference to documentation.]
Update: Made for the training set only so users can check there results from the training c
@Author: David Harvey
Created: 22 August 2012
"""
import numpy as np
import math as mt
import itertools as it
import csv as c
import getopt as gt
import sys as sys
import argparse as ap
import string as st
import random as rd
def calc_delta_r(x_predicted,y_predicted,x_true,y_true):
""" Compute the scalar distance between predicted halo centers
and the true halo centers. Predictions are matched to the closest
halo center.
Notes: It takes in the predicted and true positions, and then loops over each possile configuration and finds the most optimal one.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Returns:
radial_distance: vector containing the scalar distances between the predicted halo centres and the true halo centres (1 to 3 elements)
true_halo_idexes: vector containing indexes of the input true halos which matches the predicted halo indexes (1 to 3 elements)
measured_halo_indexes: vector containing indexes of the predicted halo position with the reference to the true halo position.
e.g if true_halo_indexes=[0,1] and measured_halo_indexes=[1,0] then the first x,y coordinates of the true halo position matches the second input of the predicted x,y coordinates.
"""
num_halos=len(x_true)
num_configurations=mt.factorial(num_halos)
configurations=np.zeros([num_halos,num_configurations],int)
distances = np.zeros([num_configurations],float)
radial_distance=[]
a=['01','012']
count=0
true_halo_indexes=[]
predicted_halo_indexes=[]
distances_perm=np.zeros([num_configurations,num_halos],float)
true_halo_indexes_perm=[]
predicted_halo_indexes_perm=[]
for perm in it.permutations(a[num_halos-2],num_halos):
which_true_halos=[]
which_predicted_halos=[]
for j in xrange(num_halos):
distances_perm[count,j]=np.sqrt((x_true[j]-x_predicted[int(perm[j])])**2\
+(y_true[j]-y_predicted[int(perm[j])])**2)
which_true_halos.append(j)
which_predicted_halos.append(int(perm[j]))
true_halo_indexes_perm.append(which_true_halos)
predicted_halo_indexes_perm.append(which_predicted_halos)
distances[count]=sum(distances_perm[count,0::])
count=count+1
config = np.where(distances == min(distances))[0][0]
radial_distance.append(distances_perm[config,0::])
true_halo_indexes=true_halo_indexes_perm[config]
predicted_halo_indexes=predicted_halo_indexes_perm[config]
return radial_distance,true_halo_indexes,predicted_halo_indexes
def calc_theta(x_predicted, y_predicted, x_true, y_true, x_ref, y_ref):
""" Calculate the angle the predicted position and the true position, where the zero degree corresponds to the line joing the true halo position and the reference point given.
Arguments:
x_predicted, y_predicted: vector for predicted x- and y-positions (1 to 3 elements)
x_true, y_true: vector for known x- and y-positions (1 to 3 elements)
Note that the input of these are matched up so that the first elements of each
vector are associated with one another
x_ref, y_ref: scalars of the x,y coordinate of reference point
Returns:
Theta: A vector containing the angles of the predicted halo w.r.t the true halo
with the vector joining the reference point and the halo as the zero line.
"""
num_halos=len(x_predicted)
theta=np.zeros([num_halos+1],float)
phi = np.zeros([num_halos],float)
psi = np.arctan( (y_true-y_ref)/(x_true-x_ref) )
phi[x_true != x_ref] = np.arctan((y_predicted[x_true != x_predicted]-\
y_true[x_true != x_predicted])\
/(x_predicted[x_true != x_predicted]-\
x_true[x_true != x_predicted]))
phi =convert_to_360(phi, x_predicted-x_true,\
y_predicted-y_true)
psi = convert_to_360(psi, x_true-x_ref,\
y_true-y_ref)
theta = phi-psi
theta[theta< 0.0]=theta[theta< 0.0]+2.0*mt.pi
return theta
def convert_to_360(angle, x_in, y_in):
""" Convert the given angle to the true angle in the range 0:2pi
Arguments:
angle:
x_in, y_in: the x and y coordinates used to determine the quartile
the coordinate lies in so to add of pi or 2pi
Returns:
theta: the angle in the range 0:2pi
"""
n = len(x_in)
for i in xrange(n):
if x_in[i] < 0 and y_in[i] > 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] < 0 and y_in[i] < 0:
angle[i] = angle[i]+mt.pi
elif x_in[i] > 0 and y_in[i] < 0:
angle[i] = angle[i]+2.0*mt.pi
elif x_in[i] == 0 and y_in[i] == 0:
angle[i] = 0
elif x_in[i] == 0 and y_in[i] > 0:
angle[i] = mt.pi/2.
elif x_in[i] < 0 and y_in[i] == 0:
angle[i] = mt.pi
elif x_in[i] == 0 and y_in[i] < 0:
angle[i] = 3.*mt.pi/2.
return angle
def get_ref(x_halo,y_halo,weight):
""" Gets the reference point of the system of halos by weighted averaging the x and y
coordinates.
Arguments:
x_halo, y_halo: Vector num_halos referrin to the coordinates of the halos
weight: the weight which will be assigned to the position of the halo
num_halos: number of halos in the system
Returns:
x_ref, y_ref: The coordinates of the reference point for the metric
"""
x_ref = np.sum([x_halo*weight])/np.sum([weight])
y_ref = np.sum([y_halo*weight])/np.sum([weight])
return x_ref,y_ref
def main_score( nhalo_all, x_true_all, y_true_all, x_ref_all, y_ref_all, sky_prediction):
"""abstracts the score from the old command-line interface.
sky_prediction is a dx2 array of predicted x,y positions
-camdp"""
r=np.array([],dtype=float)
angle=np.array([],dtype=float)
num_halos_total=0
for selectskyinsolutions, sky in enumerate(sky_prediction):
nhalo=int(nhalo_all[selectskyinsolutions])
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[0]))
y_predicted=np.append(y_predicted,float(sky[1]))
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
if nhalo == 1:
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1]
predicted_index=r_index_index[2]
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref))
av_r=sum(r)/len(r)
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000.
W2=1.
metric = W1*av_r + W2*angle_vec
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
return metric
def main(user_fname, fname):
""" Script to compute the evaluation metric for the Observing Dark Worlds competition. You can run it on your training data to understand how well you have done with the training data.
"""
r=np.array([],dtype=float)
angle=np.array([],dtype=float)
true_sky_id=[]
sky_loader = c.reader(open(fname, 'rb'))
for row in sky_loader:
true_sky_id.append(row[0])
nhalo_all=np.loadtxt(fname,usecols=(1,),delimiter=',',skiprows=1)
x_true_all=np.loadtxt(fname,usecols=(4,6,8),delimiter=',',skiprows=1)
y_true_all=np.loadtxt(fname,usecols=(5,7,9),delimiter=',',skiprows=1)
x_ref_all=np.loadtxt(fname,usecols=(2,),delimiter=',',skiprows=1)
y_ref_all=np.loadtxt(fname,usecols=(3,),delimiter=',',skiprows=1)
for row in sky_loader:
true_sky_id.append(row[1])
num_halos_total=0
sky_prediction = c.reader(open(user_fname, 'rb'))
try:
with open(user_fname, 'r') as f:
header = float((f.readline()).split(',')[1])
print 'THE INPUT FILE DOESNT APPEAR TO HAVE A HEADER'
except :
print 'THE INPUT FILE APPEARS TO HAVE A HEADER, SKIPPING THE FIRST LINE'
skip_header = sky_prediction.next()
for sky in sky_prediction:
sky_id = str(sky[0])
does_it_exist=true_sky_id.count(sky_id)
if does_it_exist > 0:
selectskyinsolutions=true_sky_id.index(sky_id)-1
else:
print 'Sky_id does not exist, formatting problem: ',sky_id
sys.exit(2)
nhalo=int(nhalo_all[selectskyinsolutions])
x_true=x_true_all[selectskyinsolutions][0:nhalo]
y_true=y_true_all[selectskyinsolutions][0:nhalo]
x_predicted=np.array([],dtype=float)
y_predicted=np.array([],dtype=float)
for i in xrange(nhalo):
x_predicted=np.append(x_predicted,float(sky[2*i+1]))
y_predicted=np.append(y_predicted,float(sky[2*i+2]))
x_ref=x_ref_all[selectskyinsolutions]
y_ref=y_ref_all[selectskyinsolutions]
num_halos_total=num_halos_total+nhalo
if nhalo == 1:
r=np.append(r,np.sqrt( (x_predicted-x_true)**2 \
+ (y_predicted-y_true)**2))
if (x_predicted-x_true) != 0:
psi = np.arctan((y_predicted-y_true)/(x_predicted-x_true))
else: psi=0.
theta = convert_to_360([psi], [x_predicted-x_true], [y_predicted-y_true])
angle=np.append(angle,theta)
else:
r_index_index = calc_delta_r(x_predicted, y_predicted, x_true, \
y_true)
r=np.append(r,r_index_index[0][0])
halo_index= r_index_index[1]
predicted_index=r_index_index[2]
angle=np.append(angle,calc_theta\
(x_predicted[predicted_index],\
y_predicted[predicted_index],\
x_true[halo_index],\
y_true[halo_index],x_ref,\
y_ref))
av_r=sum(r)/len(r)
N = float(num_halos_total)
angle_vec = np.sqrt(( 1.0/N * sum(np.cos(angle)) )**2 + \
( 1.0/N * sum(np.sin(angle)) )**2)
W1=1./1000.
W2=1.
metric = W1*av_r + W2*angle_vec
print 'Your average distance in pixels you are away from the true halo is', av_r
print 'Your average angular vector is', angle_vec
print 'Your score for the training data is', metric
if __name__ == "__main__":
parser = ap.ArgumentParser(description='Work out the Metric for your input file')
parser.add_argument('inputfile',type=str,nargs=1,help='Input file of halo positions. Needs to be in the format SkyId,halo_x1,haloy1,halox_2,halo_y2,halox3,halo_y3 ')
parser.add_argument('reffile',type=str,nargs=1,help='This should point to Training_halos.csv')
args = parser.parse_args()
user_fname=args.inputfile[0]
filename = (args.reffile[0]).count('Training_halos.csv')
if filename == 0:
fname=args.reffile[0]+str('Training_halos.csv')
else:
fname=args.reffile[0]
main(user_fname, fname)
| false
| true
|
79041988748b5e99d242d1b0996a0f5f1221f105
| 936
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/mxnet/eye_ext.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/eye_ext.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/front/mxnet/eye_ext.py
|
opencv/dldt
|
c0a2c98a457a08e8853abc18f5bd462169d0b354
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.ops.eye import MXEye
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class EyeExtractor(FrontExtractorOp):
op = '_eye'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
num_rows = attrs.int("N")
num_columns = attrs.int("M", num_rows)
if num_columns is None or num_columns == 0:
num_columns = num_rows
diagonal_index = attrs.int("k", 0)
out_type = attrs.dtype("dtype", np.float32)
new_attrs = {'num_rows': num_rows, 'num_columns': num_columns, 'diagonal_index': diagonal_index, 'output_type': out_type}
MXEye.update_node_stat(node, new_attrs)
return cls.enabled
| 34.666667
| 129
| 0.700855
|
import numpy as np
from openvino.tools.mo.ops.eye import MXEye
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs
class EyeExtractor(FrontExtractorOp):
op = '_eye'
enabled = True
@classmethod
def extract(cls, node):
attrs = get_mxnet_layer_attrs(node.symbol_dict)
num_rows = attrs.int("N")
num_columns = attrs.int("M", num_rows)
if num_columns is None or num_columns == 0:
num_columns = num_rows
diagonal_index = attrs.int("k", 0)
out_type = attrs.dtype("dtype", np.float32)
new_attrs = {'num_rows': num_rows, 'num_columns': num_columns, 'diagonal_index': diagonal_index, 'output_type': out_type}
MXEye.update_node_stat(node, new_attrs)
return cls.enabled
| true
| true
|
790419e9917bd972edf29cb9a5d0bac3ca44d44c
| 598
|
py
|
Python
|
mooda/input/read_pkl.py
|
rbardaji/mooda
|
00c0f9fae657d3d0f7dd3772a029f78a182a07b2
|
[
"MIT"
] | 15
|
2018-08-08T10:46:04.000Z
|
2021-09-24T14:38:37.000Z
|
mooda/input/read_pkl.py
|
rbardaji/mooda
|
00c0f9fae657d3d0f7dd3772a029f78a182a07b2
|
[
"MIT"
] | 3
|
2019-05-14T11:40:40.000Z
|
2020-04-30T07:17:15.000Z
|
mooda/input/read_pkl.py
|
rbardaji/mooda
|
00c0f9fae657d3d0f7dd3772a029f78a182a07b2
|
[
"MIT"
] | 4
|
2018-11-02T14:44:59.000Z
|
2021-05-10T21:57:25.000Z
|
""" Implementation of mooda.read_pkl(path) """
import pickle
from .. import WaterFrame
def read_pkl(path_pkl):
"""
Get a WaterFrame from a pickle file.
Parameters
----------
path_pkl: str
Location of the pickle file.
Returns
-------
wf_pkl: WaterFrame
"""
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, "rb"))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl
| 22.148148
| 57
| 0.600334
|
import pickle
from .. import WaterFrame
def read_pkl(path_pkl):
wf_pkl = WaterFrame()
pickle_dataset = pickle.load(open(path_pkl, "rb"))
wf_pkl.data = pickle_dataset.get('data')
wf_pkl.vocabulary = pickle_dataset.get('vocabulary')
wf_pkl.metadata = pickle_dataset.get('metadata')
return wf_pkl
| true
| true
|
79041a5d66b00791b597a86ab9714b8271acbd89
| 1,655
|
py
|
Python
|
tests/spider/setting.py
|
sirliu/feapder
|
1a8464dfa047c13f7a99c47cbff8d27c63798651
|
[
"MIT"
] | null | null | null |
tests/spider/setting.py
|
sirliu/feapder
|
1a8464dfa047c13f7a99c47cbff8d27c63798651
|
[
"MIT"
] | null | null | null |
tests/spider/setting.py
|
sirliu/feapder
|
1a8464dfa047c13f7a99c47cbff8d27c63798651
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""爬虫配置文件"""
import os
# MYSQL
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
# REDIS
# IP:PORT
REDISDB_IP_PORTS = "localhost:6379"
REDISDB_USER_PASS = ""
REDISDB_DB = 0
# # 爬虫相关
# # COLLECTOR
COLLECTOR_SLEEP_TIME = 1 # 从任务队列中获取任务到内存队列的间隔
COLLECTOR_TASK_COUNT = 100 # 每次获取任务数量
#
# # SPIDER
SPIDER_THREAD_COUNT = 1 # 爬虫并发数
# SPIDER_SLEEP_TIME = 0 # 下载时间间隔(解析完一个response后休眠时间)
# SPIDER_MAX_RETRY_TIMES = 100 # 每个请求最大重试次数
# # 重新尝试失败的requests 当requests重试次数超过允许的最大重试次数算失败
# RETRY_FAILED_REQUESTS = False
# # request 超时时间,超过这个时间重新做(不是网络请求的超时时间)单位秒
# REQUEST_LOST_TIMEOUT = 600 # 10分钟
# # 保存失败的request
# SAVE_FAILED_REQUEST = True
#
# # 下载缓存 利用redis缓存,由于内存小,所以仅供测试时使用
# RESPONSE_CACHED_ENABLE = False # 是否启用下载缓存 成本高的数据或容易变需求的数据,建议设置为True
# RESPONSE_CACHED_EXPIRE_TIME = 3600 # 缓存时间 秒
# RESPONSE_CACHED_USED = False # 是否使用缓存 补采数据时可设置为True
#
# WARNING_FAILED_COUNT = 1000 # 任务失败数 超过WARNING_FAILED_COUNT则报警
#
# # 爬虫初始化工作
# # 爬虫做完request后是否自动结束或者等待任务
# AUTO_STOP_WHEN_SPIDER_DONE = True
#
#
# # 设置代理
# PROXY_EXTRACT_API = None # 代理提取API ,返回的代理分割符为\r\n
# PROXY_ENABLE = True
#
# # 随机headers
# RANDOM_HEADERS = True
# # requests 使用session
# USE_SESSION = False
#
# # 去重
# ITEM_FILTER_ENABLE = False # item 去重
# REQUEST_FILTER_ENABLE = False # request 去重
#
# # 报警
# DINGDING_WARNING_URL = "" # 钉钉机器人api
# DINGDING_WARNING_PHONE = "" # 报警人
# LINGXI_TOKEN = "" # 灵犀报警token
#
# LOG_NAME = os.path.basename(os.getcwd())
# LOG_PATH = "log/%s.log" % LOG_NAME # log存储路径
# LOG_LEVEL = "DEBUG"
# LOG_IS_WRITE_TO_FILE = False
# OTHERS_LOG_LEVAL = "ERROR" # 第三方库的log等级
| 23.309859
| 70
| 0.735347
|
import os
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
REDISDB_IP_PORTS = "localhost:6379"
REDISDB_USER_PASS = ""
REDISDB_DB = 0
_TIME = 1
COLLECTOR_TASK_COUNT = 100
THREAD_COUNT = 1
| true
| true
|
79041a8ba61b4377cccc2fe773aa21c970c59b26
| 59,952
|
py
|
Python
|
invest_natcap/scenario_generator/scenario_generator.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
invest_natcap/scenario_generator/scenario_generator.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
invest_natcap/scenario_generator/scenario_generator.py
|
phargogh/invest-natcap.invest-3
|
ee96055a4fa034d9a95fa8ccc6259ab03264e6c1
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import os
import math
import shutil
import disk_sort
import struct
import operator
import logging
from decimal import Decimal
from fractions import Fraction
import numpy
from scipy.linalg import eig
import scipy.ndimage
import cProfile
import pstats
from osgeo import gdal, ogr
import pygeoprocessing.geoprocessing
import shutil
logging.basicConfig(format='%(asctime)s %(name)-20s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.scenario_generator.scenario_generator')
def calculate_weights(arr, rounding=4):
PLACES = Decimal(10) ** -(rounding)
# get eigenvalues and vectors
evas, eves = eig(arr)
# get primary eigenvalue and vector
eva = max(evas)
eva_idx = evas.tolist().index(eva)
eve = eves.take((eva_idx,), axis=1)
# priority vector = normalized primary eigenvector
normalized = eve / sum(eve)
# turn into list of real part values
vector = [abs(e[0]) for e in normalized]
# return nice rounded Decimal values with labels
return [ Decimal( str(v) ).quantize(PLACES) for v in vector ]
def calculate_priority(table_uri):
table = [line.strip().split(",") for line in open(table_uri).readlines()]
id_index = table[0].index("Id")
cover_id_list = [row[id_index] for row in table]
cover_id_list.pop(0)
cover_id_index_list = [table[0].index(cover_id) for cover_id in cover_id_list]
matrix = numpy.zeros((len(cover_id_list),len(cover_id_list)))
for row in range(len(cover_id_list)):
for col in range(row+1):
matrix[row][col] = float(table[row+1][cover_id_index_list[col]])
matrix[col][row] = 1 / matrix[row][col]
cover_id_list = [int(cover_id) for cover_id in cover_id_list]
return dict(zip(cover_id_list, calculate_weights(matrix, 4)))
def calculate_distance_raster_uri(dataset_in_uri, dataset_out_uri):
# Compute pixel distance
pygeoprocessing.geoprocessing.distance_transform_edt(dataset_in_uri, dataset_out_uri)
# Convert to meters
def pixel_to_meters_op(x):
x[x != nodata] *= cell_size
return x
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_in_uri)
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_out_uri)
tmp = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.vectorize_datasets(
[dataset_out_uri], \
pixel_to_meters_op, \
tmp, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets(
[tmp], \
identity_op, \
dataset_out_uri, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
# Compute raster stats so the raster is viewable in QGIS and Arc
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(dataset_out_uri)
##def calculate_distance_raster_uri(dataset_in_uri, dataset_out_uri, cell_size = None, max_distance = None):
## if cell_size == None:
## cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_in_uri)
##
## memory_array = pygeoprocessing.geoprocessing.load_memory_mapped_array(dataset_in_uri, pygeoprocessing.geoprocessing.temporary_filename())
##
## memory_array = scipy.ndimage.morphology.distance_transform_edt(memory_array) * cell_size
##
## nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_in_uri)
##
#### if max_distance != None:
#### memory_array[memory_array > max_distance] = nodata
##
## pygeoprocessing.geoprocessing.new_raster_from_base_uri(dataset_in_uri, dataset_out_uri, 'GTiff', nodata, gdal.GDT_Float32)
##
## dataset_out = gdal.Open(dataset_out_uri, 1)
## band = dataset_out.GetRasterBand(1)
## band.WriteArray(memory_array)
##
## band = None
## dataset_out = None
shapeTypes= {0: "Null Shape", 1: "Point", 3: "PolyLine", 5: "Polygon",
8: "MultiPoint", 11: "PointZ", 13: "PolyLineZ",
15: "PolygonZ", 18: "MultiPointZ", 21: "PointM",
23: "PolyLineM", 25: "PolygonM", 28: "MultiPointM",
31: "MultiPatch"}
def get_geometry_type_from_uri(datasource_uri):
datasource = open(datasource_uri, 'r')
datasource.seek(32)
shape_type ,= struct.unpack('<i',datasource.read(4))
datasource.close()
return shape_type
def get_transition_set_count_from_uri(dataset_uri_list):
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_uri_list[0])
lulc_nodata = int(pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_uri_list[0]))
nodata = 0
#reclass rasters to compact bit space
lulc_codes = set()
unique_raster_values_count = {}
for dataset_uri in dataset_uri_list:
unique_raster_values_count[dataset_uri] = pygeoprocessing.geoprocessing.unique_raster_values_count(dataset_uri)
lulc_codes.update(unique_raster_values_count[dataset_uri].keys())
lulc_codes = list(lulc_codes)
lulc_codes.sort()
if len(lulc_codes) < 2 ** 8:
data_type = gdal.GDT_UInt16
shift = 8
elif len(lulc_codes) < 2 ** 16:
data_type = gdal.GDT_UInt32
shift = 16
else:
raise ValueError, "Too many LULC codes."
#renumber and reclass rasters
reclass_orig_dict = dict(zip(lulc_codes,range(1,len(lulc_codes)+1)))
reclass_dest_dict = {}
for key in reclass_orig_dict:
reclass_dest_dict[key] = reclass_orig_dict[key] << shift
def add_op(orig, dest):
return orig + dest
counts={}
for i in range(len(dataset_uri_list)-1):
orig_uri = pygeoprocessing.geoprocessing.temporary_filename()
dest_uri = pygeoprocessing.geoprocessing.temporary_filename()
multi_uri = pygeoprocessing.geoprocessing.temporary_filename()
#reclass orig values
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i],
reclass_orig_dict,
orig_uri,
data_type,
nodata,
exception_flag="values_required")
#reclass dest values
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i+1],
reclass_dest_dict,
dest_uri,
data_type,
nodata,
exception_flag="values_required")
#multiplex orig with dest
pygeoprocessing.geoprocessing.vectorize_datasets([orig_uri, dest_uri],
add_op,
multi_uri,
data_type,
nodata,
cell_size,
"union")
#get unique counts
counts[i]=pygeoprocessing.geoprocessing.unique_raster_values_count(multi_uri, False)
restore_classes = {}
for key in reclass_orig_dict:
restore_classes[reclass_orig_dict[key]] = key
restore_classes[nodata] = lulc_nodata
LOGGER.debug("Decoding transition table.")
transitions = {}
for key in counts:
transitions[key]={}
for k in counts[key]:
try:
orig = restore_classes[k % (2**shift)]
except KeyError:
orig = lulc_nodata
try:
dest = restore_classes[k >> shift]
except KeyError:
dest = lulc_nodata
try:
transitions[key][orig][dest] = counts[key][k]
except KeyError:
transitions[key][orig] = {dest : counts[key][k]}
return unique_raster_values_count, transitions
def generate_chart_html(cover_dict, cover_names_dict, workspace_dir):
html = "\n<table BORDER=1>"
html += "\n<TR><td>Id</td><td>% Before</td><td>% After</td></TR>"
cover_id_list = cover_dict.keys()
cover_id_list.sort()
cover_id_list_chart = cover_names_dict.keys()
cover_id_list_chart.sort()
pixcount = 0
for cover_id in cover_id_list:
pixcount += cover_dict[cover_id][0]
pixcount = float(pixcount)
for cover_id in cover_id_list:
html += "\n<TR><td>%i</td><td>%i</td><td>%i</td></TR>" % (cover_id,
(cover_dict[cover_id][0] / pixcount) * 100,
(cover_dict[cover_id][1] / pixcount) * 100 )
html += "\n<table>"
#create three charts for original, final and change
thecharts = [
['Original',0],
['Final',1],
['Change',2]
]
hainitial = ""
hainitialnegative = ""
hainitiallist = []
hafinal = ""
hafinalnegative = ""
hafinallist = []
hachange = ""
hachangelist = []
haall = []
initialcover = []
finalcover = []
for cover_id in cover_id_list_chart:
try:
initialcover.append((cover_dict[cover_id][0] / pixcount) * 100)
except KeyError:
initialcover.append(0)
try:
finalcover.append((cover_dict[cover_id][1] / pixcount) * 100)
except KeyError:
finalcover.append(0)
#return html
html += "<style type='text/css'>"
html += "body {font-family: Arial, Helvetica, sans-serif; font-size: 0.9em;}"
html += "table#results {margin: 20px auto}"
html += "table#results th {text-align: left}"
html += "</style>"
html += "<script type='text/javascript'>\n"
html += "var chart,\n"
categories = []
html += "categories = ["
for cover_id in cover_id_list_chart:
#pass
categories.append("'"+cover_names_dict[cover_id]+"'")
html += ",".join(categories)
html += "]\n"
html +="$(document).ready(function() {\n"
for x in initialcover:
hainitial = hainitial +str(x)+","
hainitialnegative = hainitialnegative + "0,"
hainitiallist.append(float(x))
temp = []
temp.append(hainitial)
temp.append(hainitialnegative)
haall.append(temp)
thecharts[0].append(max(hainitiallist))
thecharts[0].append(min(hainitiallist))
for x in finalcover:
hafinal = hafinal +str(x)+","
hafinalnegative = hafinalnegative + "0,"
hafinallist.append(float(x))
temp = []
temp.append(hafinal)
temp.append(hafinalnegative)
haall.append(temp)
thecharts[1].append(max(hafinallist))
thecharts[1].append(min(hafinallist))
for x in range(len(initialcover)):
hachange = hachange + str(float(finalcover[x]) - float(initialcover[x]))+","
hachangelist.append(float(finalcover[x]) - float(initialcover[x]))
#split the change values
hachangelistnegative = ""
hachangelistpositive = ""
for item in hachangelist:
if item < 0:
hachangelistnegative=hachangelistnegative+str(item)+","
hachangelistpositive=hachangelistpositive+"0,"
else:
hachangelistpositive=hachangelistpositive+str(item)+","
hachangelistnegative=hachangelistnegative+"0,"
temp = []
temp.append(hachangelistpositive)
temp.append(hachangelistnegative)
haall.append(temp)
thecharts[2].append(max(hachangelist))
thecharts[2].append(min(hachangelist))
if thecharts[0][2] > thecharts[1][2]:
thecharts[1][2] = thecharts[0][2]
thecharts[2][2] = thecharts[0][2]
else:
thecharts[0][2] = thecharts[1][2]
thecharts[2][2] = thecharts[1][2]
for x in thecharts:
if x[0] == 'Change':
themin = x[3]
else:
themin = 0
html += "chart = new Highcharts.Chart({\n"
html += "chart: {renderTo: '"+x[0]+"container',defaultSeriesType: 'bar'},"
html += "title: {text: '"+x[0]+" Landcover'},"
html += "subtitle: {text: ''},"
html += "xAxis: [{categories: categories,reversed: false}, {opposite: true, reversed: false,categories: categories,linkedTo: 0}],"
html += "yAxis: {title: {text: null},labels: {formatter: function(){return Math.abs(this.value)}},min: "+str(themin)+",max: "+str(x[2])+"},"
html += "plotOptions: {series: { stacking: 'normal', showInLegend: false } },"
html += "tooltip: { formatter: function(){return '<b>'+ this.point.category +'</b><br/>'+'Area: '+ Highcharts.numberFormat(Math.abs(this.point.y), 0)+'%';}},"
html += "series: [{name: '',"
html += "data: ["+haall[x[1]][0]+"]}, {"
html += "name: '',"
html += "data: ["+haall[x[1]][1]+"]}]});\n"
html += "});\n"
html += "</script>\n"
for x in thecharts:
html += "<div id='"+x[0]+"container' style='width: 800px; height: 400px; margin: 20px 0'></div>\n"
return html
def filter_fragments(input_uri, size, output_uri):
#clump and sieve
LOGGER.debug("Filtering patches smaller than %i from %s.", size, input_uri)
src_ds = gdal.Open(input_uri)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
driver = gdal.GetDriverByName("GTiff")
driver.CreateCopy(output_uri, src_ds, 0 )
dst_ds = gdal.Open(output_uri, 1)
dst_band = dst_ds.GetRasterBand(1)
dst_array = numpy.copy(src_array)
suitability_values = numpy.unique(src_array)
if suitability_values[0] == 0:
suitability_values = suitability_values[1:]
#8 connectedness preferred, 4 connectedness allowed
eight_connectedness = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
four_connectedness = numpy.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
suitability_values_count = suitability_values.size
for v in range(1, suitability_values_count):
LOGGER.debug('Processing suitability value ' + \
str(suitability_values.size - v))
value = suitability_values[v]
# Pixels of interest set to 1, 0 otherwise
mask = src_array == value
# Number of pixels to process
ones_in_mask = numpy.sum(mask)
# Label and count disconnected components (fragments)
label_im, nb_labels = scipy.ndimage.label(mask, four_connectedness)
# Compute fragment sizes
fragment_sizes = \
scipy.ndimage.sum(mask, label_im, range(nb_labels + 1))
# List fragments
fragment_labels = numpy.array(range(nb_labels + 1))
# Discard large fragments
small_fragment_mask = numpy.where(fragment_sizes <= size)
# Gather small fragment information
small_fragment_sizes = fragment_sizes[small_fragment_mask]
small_fragment_labels = fragment_labels[small_fragment_mask]
combined_small_fragment_size = numpy.sum(small_fragment_sizes)
# Find each fragment
fragments_location = scipy.ndimage.find_objects(label_im, nb_labels)
removed_pixels = 0
small_fragment_labels_count = small_fragment_labels.size
for l in range(small_fragment_labels_count-1):
label = small_fragment_labels[l+1]
last_label = small_fragment_labels[l]
size = small_fragment_sizes[l+1]
source = label_im[fragments_location[last_label]]
target = dst_array[fragments_location[last_label]]
pixels_to_remove = numpy.where(source == label)
target[pixels_to_remove] = 0
dst_band.WriteArray(dst_array)
def sum_uri(dataset_uri, datasource_uri):
"""Wrapper call to pygeoprocessing.geoprocessing.aggregate_raster_values_uri to extract total
:param dataset_uri: The uri for the input raster.
:type dataset_uri: str
:return: None
:rtype: None
"""
total = pygeoprocessing.geoprocessing.aggregate_raster_values_uri(dataset_uri, datasource_uri).total
return total.__getitem__(total.keys().pop())
def execute(args):
###
#overiding, non-standard field names
###
# Preliminary tests
if ('transition' in args) and ('suitability' in args):
assert args['transition'] != args['suitability'], \
'Transition and suitability tables are the same: ' + \
args['transition'] + '. The model expects different tables.'
#transition table fields
args["transition_id"] = "Id"
args["percent_field"] = "Percent Change"
args["area_field"] = "Area Change"
args["priority_field"] = "Priority"
args["proximity_field"] = "Proximity"
args["proximity_weight"] = "0.3"
args["patch_field"] = "Patch ha"
#factors table fields
args["suitability_id"] = "Id"
args["suitability_layer"] = "Layer"
args["suitability_weight"] = "Wt"
args["suitability_field"] = "Suitfield"
args["distance_field"] = "Dist"
args["suitability_cover_id"] = "Cover ID"
#exercise fields
args["returns_cover_id"] = "Cover ID"
args["returns_layer"] = "/Users/olwero/Dropbox/Work/Ecosystem_Services/NatCap/Olympics/2014/Scenarios/Exercise/inputtest/returns.csv"
###
#get parameters, set outputs
###
workspace = args["workspace_dir"]
if not os.path.exists(workspace):
os.makedirs(workspace)
landcover_uri = args["landcover"]
if len(args["suffix"]) > 0:
suffix = "_" + args["suffix"].strip("_")
else:
suffix = ""
intermediate_dir = "intermediate"
if not os.path.exists(os.path.join(workspace, intermediate_dir)):
os.makedirs(os.path.join(workspace, intermediate_dir))
proximity_weight = float(args["proximity_weight"])
#it might be better to just check if factors being used
try:
physical_suitability_weight = float(args["weight"])
except KeyError:
physical_suitability_weight = 0.5
##output file names
#absolute paths
landcover_resample_uri = os.path.join(workspace, "resample" + suffix + ".tif")
landcover_transition_uri = os.path.join(workspace,"transitioned" + suffix + ".tif")
override_dataset_uri = os.path.join(workspace,"override" + suffix + ".tif")
landcover_htm_uri = os.path.join(workspace,"scenario-output-summary" + suffix + ".html")
pygeoprocessing.geoprocessing.create_directories([workspace])
#relative paths, or with patterned name
transition_name = os.path.join(intermediate_dir, "transition_%i" + suffix + ".tif")
suitability_name = os.path.join(intermediate_dir, "%s_%s" + suffix + ".tif")
normalized_name = os.path.join(intermediate_dir, "%s_%s_norm" + suffix + ".tif")
combined_name = os.path.join(intermediate_dir, "factors_%s" + suffix + ".tif")
constraints_name = os.path.join(intermediate_dir, "constraints" + suffix + ".tif")
filter_name = os.path.join(intermediate_dir, "filter_%i" + suffix + ".tif")
factors_name = os.path.join(intermediate_dir, "suitability_%s" + suffix + ".tif")
cover_name = os.path.join(intermediate_dir, "cover_%i" + suffix + ".tif")
proximity_name = os.path.join(intermediate_dir, "proximity_%s" + suffix + ".tif")
normalized_proximity_name = os.path.join(intermediate_dir, "proximity_norm_%s" + suffix + ".tif")
adjusted_suitability_name = os.path.join(intermediate_dir, "adjusted_suitability_%s" + suffix + ".tif")
scenario_name = "scenario" + suffix + ".tif"
###
#constants
###
raster_format = "GTiff"
transition_type = gdal.GDT_Int16
transition_nodata = -1
change_nodata = -9999
#value to multiply transition matrix entries (ie covert 10 point scale to 100 point scale)
transition_scale = 10
distance_scale = 100
suitability_nodata = 0
suitability_type = gdal.GDT_Int16
def suitability_op(trans, suit):
if trans == 0:
return 0
return round(((1 - physical_suitability_weight) * trans)\
+ (physical_suitability_weight * suit))
ds_type = "GTiff"
driver = gdal.GetDriverByName(ds_type)
###
#validate data
###
#raise warning if nothing is going to happen
if not any([args["calculate_transition"],
args["calculate_factors"],
args["override_layer"]]):
msg = "You must select at least one of the following: specify transitions, use factors, or override layer."
LOGGER.error(msg)
raise ValueError, msg
##transition table validation
#raise error if transition table provided, but not used
if args["transition"] and not(args["calculate_transition"] or args["calculate_factors"]):
msg = "Transition table provided but not used."
LOGGER.warn(msg)
#raise ValueError, msg
transition_dict = {}
if args["calculate_transition"] or args["calculate_factors"]:
#load transition table
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
#raise error if LULC contains cover id's not in transition table
landcover_count_dict = pygeoprocessing.geoprocessing.unique_raster_values_count(landcover_uri)
missing_lulc = set(landcover_count_dict).difference(transition_dict.keys())
if len(missing_lulc) > 0 :
missing_lulc = list(missing_lulc)
missing_lulc.sort()
mising_lulc = ", ".join([str(l) for l in missing_lulc])
msg = "Missing suitability information for cover(s) %s." % missing_lulc
LOGGER.error(msg)
raise ValueError, msg
for cover_id in transition_dict:
#raise error if percent change for new LULC
if (transition_dict[cover_id][args["percent_field"]] > 0) and not (cover_id in landcover_count_dict):
msg = "Cover %i does not exist in LULC and therefore cannot have a percent change." % cover_id
LOGGER.error(msg)
raise ValueError, msg
#raise error if change by percent and area both specified
if (transition_dict[cover_id][args["percent_field"]] > 0) and (transition_dict[cover_id][args["area_field"]] > 0):
msg = "Cover %i cannot have both an increase by percent and area." % cover_id
LOGGER.error(msg)
raise ValueError, msg
##factor parameters validation
if args["calculate_factors"]:
pass
#error if overall physical weight not in [0, 1] range
##factor table validation
#if polygon no distance field allowed
#if point or line, integer distance field only
#error if same factor twice for same coverage
###
#resample, align and rasterize data
###
if args["calculate_priorities"]:
LOGGER.info("Calculating priorities.")
priorities_dict = calculate_priority(args["priorities_csv_uri"])
#check geographic extents, projections
## #validate resampling size
## if args["resolution"] != "":
## if args["resolution"] < pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri):
## msg = "The analysis resolution cannot be smaller than the input."
## LOGGER.error(msg)
## raise ValueError, msg
##
## else:
## LOGGER.info("Resampling land cover.")
## #gdal.GRA_Mode might be a better resample method, but requires GDAL >= 1.10.0
## bounding_box = pygeoprocessing.geoprocessing.get_bounding_box(landcover_uri)
## pygeoprocessing.geoprocessing.resize_and_resample_dataset_uri(landcover_uri,
## bounding_box,
## args["resolution"],
## landcover_resample_uri,
## "nearest")
## LOGGER.debug("Changing landcover uri to resampled uri.")
## landcover_uri = landcover_resample_uri
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri)
suitability_transition_dict = {}
if args["calculate_transition"]:
for next_lulc in transition_dict:
this_uri = os.path.join(workspace, transition_name % next_lulc)
#construct reclass dictionary
reclass_dict = {}
all_zeros = True
for this_lulc in transition_dict:
value = int(transition_dict[this_lulc][str(next_lulc)])
reclass_dict[this_lulc] = value * transition_scale
all_zeros = all_zeros and (value == 0)
if not all_zeros:
#reclass lulc by reclass_dict
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
this_uri,
transition_type,
suitability_nodata,
exception_flag = "values_required")
#changing nodata value so 0's no longer nodata
dataset = gdal.Open(this_uri, 1)
band = dataset.GetRasterBand(1)
nodata = band.SetNoDataValue(transition_nodata)
dataset = None
suitability_transition_dict[next_lulc] = this_uri
suitability_factors_dict = {}
if args["calculate_factors"]:
factor_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["suitability"], args["suitability_id"])
factor_uri_dict = {}
factor_folder = args["suitability_folder"]
if not args["factor_inclusion"]:
option_list=["ALL_TOUCHED=TRUE"]
else:
option_list = ["ALL_TOUCHED=FALSE"]
for factor_id in factor_dict:
factor = factor_dict[factor_id][args["suitability_layer"]]
factor_stem, _ = os.path.splitext(factor)
suitability_field_name = factor_dict[factor_id][args["suitability_field"]]
distance = factor_dict[factor_id][args["distance_field"]]
cover_id = int(factor_dict[factor_id][args["suitability_cover_id"]])
weight = int(factor_dict[factor_id][args["suitability_weight"]])
LOGGER.debug("Found reference to factor (%s, %s, %s) for cover %i.", factor_stem, suitability_field_name, distance, cover_id)
if not (factor_stem, suitability_field_name, distance) in factor_uri_dict:
factor_uri = os.path.join(factor_folder, factor)
if not os.path.exists(factor_uri):
msg = "Missing file %s." % factor_uri
LOGGER.error(msg)
raise ValueError, msg
shape_type = get_geometry_type_from_uri(factor_uri)
LOGGER.debug("Processing %s.", shapeTypes[shape_type])
if shape_type in [5, 15, 25, 31]: #polygon
LOGGER.info("Rasterizing %s using sutibality field %s.", factor_stem, suitability_field_name)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, suitability_field_name))
burn_value = [1]
suitability_field = ["ATTRIBUTE=%s" % suitability_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 0)
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list=option_list + suitability_field)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = ds_uri
elif shape_type in [1, 3, 8, 11, 13, 18, 21, 23, 28]: #point or line
# For features with no area, it's (almost) impossible to
# hit the center pixel, so we use ALL_TOUCHED=TRUE
option_list=["ALL_TOUCHED=TRUE"]
distance = int(distance)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_raster'))
distance_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_distance'))
fdistance_uri = os.path.join(workspace, suitability_name % (factor_stem, distance))
normalized_uri = os.path.join(workspace, normalized_name % (factor_stem, distance))
burn_value = [1]
LOGGER.info("Buffering rasterization of %s to distance of %i.", factor_stem, distance)
gdal_format = gdal.GDT_Byte
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, -1, gdal_format)
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
ds_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(ds_uri)
pygeoprocessing.geoprocessing.vectorize_datasets([landcover_uri], \
lambda x: 0 if x != landcover_nodata else -1, \
ds_uri, \
pygeoprocessing.geoprocessing.get_datatype_from_uri(ds_uri), \
ds_nodata, \
pygeoprocessing.geoprocessing.get_cell_size_from_uri(ds_uri), \
'intersection')
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list)
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
result = numpy.where(value > distance, transition_nodata, value)
return numpy.where(value == transition_nodata, transition_nodata, result)
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union",
vectorize_op = False)
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(fdistance_uri)
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
def normalize_op(value):
diff = float(maximum - minimum)
return numpy.where(
value == transition_nodata,
suitability_nodata,
((distance_scale - 1) - (((value - minimum) / \
diff) * (distance_scale - 1))) + 1)
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = normalized_uri
else:
raise ValueError, "Invalid geometry type %i." % shape_type
# Apply nodata to the factors raster
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
temp_uri = pygeoprocessing.geoprocessing.temporary_filename()
def apply_nodata_op(landcover, value):
return numpy.where(landcover == landcover_uri, 0, value)
pygeoprocessing.geoprocessing.vectorize_datasets( \
[landcover_uri,
factor_uri_dict[(factor_stem, suitability_field_name, distance)]],
apply_nodata_op,
temp_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets( \
[temp_uri],
identity_op,
factor_uri_dict[(factor_stem, suitability_field_name, distance)],
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
else:
LOGGER.debug("Skipping already processed suitability layer.")
LOGGER.debug("Adding factor (%s, %s, %s) to cover %i suitability list.", factor_stem, suitability_field_name, distance, cover_id)
if cover_id in suitability_factors_dict:
suitability_factors_dict[cover_id].append((factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight))
else:
suitability_factors_dict[cover_id] = [(factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight)]
for cover_id in suitability_factors_dict:
if len(suitability_factors_dict[cover_id]) > 1:
LOGGER.info("Combining factors for cover type %i.", cover_id)
ds_uri = os.path.join(workspace, combined_name % cover_id)
uri_list, weights_list = apply(zip, suitability_factors_dict[cover_id])
total = float(sum(weights_list))
weights_list = [weight / total for weight in weights_list]
def weighted_op(*values):
result = (values[0] * weights_list[0]).astype(float)
for v, w in zip(values[1:], weights_list[1:]):
result += v * w
return result
# print('------files:', uri_list, weights_list)
pygeoprocessing.geoprocessing.vectorize_datasets(list(uri_list),
weighted_op,
ds_uri,
suitability_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
suitability_factors_dict[cover_id] = ds_uri
else:
suitability_factors_dict[cover_id] = suitability_factors_dict[cover_id][0][0]
suitability_dict = {}
if args["calculate_transition"]:
suitability_dict = suitability_transition_dict
if args["calculate_factors"]:
for cover_id in suitability_factors_dict:
if cover_id in suitability_dict:
LOGGER.info("Combining suitability for cover %i.", cover_id)
ds_uri = os.path.join(workspace, factors_name % cover_id)
print('cover_ids', suitability_dict.keys())
pygeoprocessing.geoprocessing.vectorize_datasets([suitability_transition_dict[cover_id],
suitability_factors_dict[cover_id]],
suitability_op,
ds_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = ds_uri
else:
suitability_dict[cover_id] = suitability_factors_dict[cover_id]
elif args["calculate_factors"]:
suitability_dict = suitability_factors_dict
#clump and sieve
for cover_id in transition_dict:
if (transition_dict[cover_id][args["patch_field"]] > 0) and (cover_id in suitability_dict):
LOGGER.info("Filtering patches from %i.", cover_id)
size = 10000 * int(math.ceil( \
transition_dict[cover_id][args["patch_field"]] / \
(cell_size ** 2)))
output_uri = os.path.join(workspace, filter_name % cover_id)
filter_fragments(suitability_dict[cover_id], size, output_uri)
suitability_dict[cover_id] = output_uri
###
#compute intermediate data if needed
###
#contraints raster (reclass using permability values, filters on clump size)
if args["calculate_constraints"]:
LOGGER.info("Rasterizing constraints.")
constraints_uri = args["constraints"]
constraints_field_name = args["constraints_field"]
constraints_ds_uri = os.path.join(workspace, constraints_name)
option_list = ["ALL_TOUCHED=FALSE"]
burn_value = [0]
constraints_field = ["ATTRIBUTE=%s" % constraints_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, constraints_ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 1)
pygeoprocessing.geoprocessing.rasterize_layer_uri(constraints_ds_uri, constraints_uri, burn_value, option_list=option_list + constraints_field)
# Check that the values make sense
raster = gdal.Open(constraints_ds_uri)
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
unique = numpy.unique(array)
assert (unique[0] >= 0.0) and (unique[-1] <= 1.0), \
'Invalid raster value in field ' + constraints_field_name + ' in ' \
+ constraints_uri
else:
LOGGER.info("Constraints not included.")
proximity_dict = {}
if args["calculate_proximity"]:
LOGGER.info("Calculating proximity.")
cover_types = transition_dict.keys()
for cover_id in transition_dict:
if transition_dict[cover_id][args["proximity_field"]] > 0 and cover_id in suitability_dict:
distance = int(transition_dict[cover_id][args["proximity_field"]])
LOGGER.info("Calculating proximity for %i.", cover_id)
reclass_dict = dict(zip(cover_types, [1] * len(cover_types)))
reclass_dict[cover_id] = 0
ds_uri = os.path.join(workspace, cover_name % cover_id)
distance_uri = pygeoprocessing.geoprocessing.temporary_filename()
fdistance_uri = os.path.join(workspace, proximity_name % cover_id)
normalized_uri = os.path.join(workspace, normalized_proximity_name % cover_id)
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
ds_uri,
transition_type,
transition_nodata,
exception_flag = "values_required")
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
if value > distance:
return transition_nodata
return value
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union")
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
assert minimum < maximum, "Wrong distance (min, max) = (" + \
str(minimum) + ", " + str(maximum) + ") in " + fdistance_uri
def normalize_op(value):
if value == transition_nodata:
return suitability_nodata
else:
return ((distance_scale - 1) \
- (((value - minimum) \
/ float(maximum - minimum)) \
* (distance_scale - 1))) \
+ 1
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union")
proximity_dict[cover_id] = normalized_uri
def es_change_op(final_es ,initial_es):
return final_es - initial_es
def constraint_op(suit, cons):
return suit * cons
def proximity_op(suit, prox):
v = suit + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
def constraint_proximity_op(suit, cons, prox):
v = (cons * suit) + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
for cover_id in suitability_dict:
suitability_uri = os.path.join(workspace, adjusted_suitability_name % cover_id)
if args["calculate_constraints"]:
if cover_id in proximity_dict:
LOGGER.info("Combining suitability, proximity, and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri,
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
else:
LOGGER.info("Combining suitability and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri]
# print('------suitability and constraint files:', uri_list)
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
elif cover_id in proximity_dict:
LOGGER.info("Combining suitability and proximity for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
#normalize probabilities to be on a 10 point scale
#probability raster (reclass using probability matrix)
#proximity raster (gaussian for each landcover type, using max distance)
#InVEST 2 uses 4-connectedness?
#combine rasters for weighting into sutibility raster, multiply proximity by 0.3
#[suitability * (1-factor weight)] + (factors * factor weight) or only single raster
###
#reallocate pixels (disk heap sort, randomly reassign equal value pixels, applied in order)
###
#copy initial LULC
scenario_uri = os.path.join(workspace, scenario_name)
src_ds = gdal.Open(landcover_uri)
n_cols = src_ds.RasterXSize
n_rows = src_ds.RasterYSize
dst_ds = driver.CreateCopy(scenario_uri, src_ds, 0)
dst_ds = None
src_ds = None
#identify LULC types undergoing change
change_list = []
if args["calculate_priorities"]:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
else:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
change_list.sort(reverse=True)
#change pixels
scenario_ds = gdal.Open(scenario_uri, 1)
scenario_band = scenario_ds.GetRasterBand(1)
scenario_array = scenario_band.ReadAsArray()
unconverted_pixels = {}
for index, (priority, cover_id, count) in enumerate(change_list):
LOGGER.debug("Increasing cover %i by %i pixels.", cover_id, count)
#open all lower priority suitability rasters and assign changed pixels value of 0
update_ds = {}
update_bands = {}
update_arrays = {}
for _, update_id, _ in change_list[index+1:]:
update_ds[update_id] = gdal.Open(suitability_dict[update_id], 1)
update_bands[update_id] = update_ds[update_id].GetRasterBand(1)
update_arrays[update_id] = update_bands[update_id].ReadAsArray()
##select pixels
#open suitability raster
src_ds = gdal.Open(suitability_dict[cover_id], 1)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
pixels_changed = 0
suitability_values = list(numpy.unique(src_array))
suitability_values.sort(reverse=True)
if suitability_values[-1]==0:
suitability_values.pop(-1)
for suitability_score in suitability_values:
# Check if suitsbility is between 0 and 100 inclusive
if abs(suitability_score - 50) > 50:
print('suitability_values:', suitability_dict[cover_id])
for v in suitability_values:
print v, ' ',
assert abs(suitability_score - 50) <= 50, \
'Invalid suitability score ' + str(suitability_score)
if pixels_changed == count:
LOGGER.debug("All necessay pixels converted.")
break
LOGGER.debug("Checking pixels with suitability of %i.", suitability_score)
#mask out everything except the current suitability score
mask = src_array == suitability_score
#label patches
label_im, nb_labels = scipy.ndimage.label(mask)
#get patch sizes
patch_sizes = scipy.ndimage.sum(mask, label_im, range(1, nb_labels + 1))
patch_labels = numpy.array(range(1, nb_labels + 1))
patch_locations = scipy.ndimage.find_objects(label_im, nb_labels)
#randomize patch order
numpy.random.shuffle(patch_labels)
#check patches for conversion
patch_label_count = patch_labels.size
for l in range(patch_label_count):
label = patch_labels[l]
source = label_im[patch_locations[label-1]]
target = scenario_array[patch_locations[label-1]]
pixels_to_change = numpy.where(source == label)
assert pixels_to_change[0].size == patch_sizes[label-1]
if patch_sizes[label-1] + pixels_changed > count:
#mask out everything except the current patch
#patch = numpy.where(label_im == label)
#patch_mask = numpy.zeros_like(scenario_array)
patch_mask = numpy.zeros_like(target)
#patch_mask[patch] = 1
patch_mask[pixels_to_change] = 1
#calculate the distance to exit the patch
#tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
#tmp_array = tmp_array[patch]
tmp_array = tmp_array[pixels_to_change]
#select the number of pixels that need to be converted
tmp_index = numpy.argsort(tmp_array)
tmp_index = tmp_index[:count - pixels_changed]
#convert the selected pixels into coordinates
#pixels_to_change = numpy.array(zip(patch[0], patch[1]))
pixels_to_change = numpy.array(zip(pixels_to_change[0], pixels_to_change[1]))
pixels_to_change = pixels_to_change[tmp_index]
pixels_to_change = apply(zip, pixels_to_change)
#change the pixels in the scenario
#scenario_array[pixels_to_change] = cover_id
target[pixels_to_change] = cover_id
pixels_changed = count
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
#update_arrays[update_id][pixels_to_change] = 0
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
break
else:
#convert patch, increase count of changes
target[pixels_to_change] = cover_id
pixels_changed += patch_sizes[label-1]
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
#report and record unchanged pixels
if pixels_changed < count:
LOGGER.warn("Not all pixels converted.")
unconverted_pixels[cover_id] = count - pixels_changed
#write new suitability arrays
for _, update_id, _ in change_list[index+1:]:
update_bands[update_id].WriteArray(update_arrays[update_id])
update_arrays[update_id] = None
update_bands[update_id] = None
update_ds[update_id] = None
scenario_band.WriteArray(scenario_array)
scenario_array = None
scenario_band = None
scenario_ds = None
#apply override
if args["override_layer"]:
LOGGER.info("Overriding pixels using values from field %s.", args["override_field"])
datasource = ogr.Open(args["override"])
layer = datasource.GetLayer()
dataset = gdal.Open(scenario_uri, 1)
if dataset == None:
msg = "Could not open landcover transition raster."
LOGGER.error(msg)
raise IOError, msg
if datasource == None:
msg = "Could not open override vector."
LOGGER.error(msg)
raise IOError, msg
if not bool(args["override_inclusion"]):
LOGGER.debug("Overriding all touched pixels.")
options = ["ALL_TOUCHED=TRUE", "ATTRIBUTE=%s" % args["override_field"]]
else:
LOGGER.debug("Overriding only pixels with covered center points.")
options = ["ATTRIBUTE=%s" % args["override_field"]]
gdal.RasterizeLayer(dataset, [1], layer, options=options)
dataset.FlushCache()
datasource = None
dataset = None
###
#tabulate coverages
###
unique_raster_values_count, transitions = get_transition_set_count_from_uri([landcover_uri, scenario_uri])
htm = open(landcover_htm_uri,'w')
htm.write("<html><head><title>Scenario Generator Report</title>")
htm.write("<style type='text/css'>")
htm.write("table {border-collapse: collapse; font-size: 1em;}")
htm.write("td {padding: 10px;}")
htm.write('body {font-family: Arial, Helvetica, sans-serif; font-size: 1em;}')
htm.write('h2 {background: #DDDDDD; padding: 10px;}')
htm.write("</style>")
jquery_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "jquery-1.6.2.min.js")
htm.write("<script>\n" + open(jquery_uri).read() + "\n</script>")
highcharts_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "highcharts.js")
htm.write("<script>\n" + open(highcharts_uri).read() + "\n</script>")
htm.write("</head><body>")
htm.write("<div style=''>")
htm.write("<h1>Scenario Output Summary</h1>")
htm.write("<h2>Initial Landscape</h2>")
htm.write("\n<table BORDER=1>")
initial_cover_id_list = unique_raster_values_count[landcover_uri].keys()
initial_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[landcover_uri][cover_id]) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
htm.write("<h2>Scenario Landscape</h2>")
htm.write("\n<table BORDER=1>")
scenario_cover_id_list = unique_raster_values_count[scenario_uri].keys()
scenario_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[scenario_uri][cover_id]) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
cover_dict = {}
for cover_id in set(unique_raster_values_count[landcover_uri].keys()).union(set(unique_raster_values_count[scenario_uri].keys())):
try:
before = unique_raster_values_count[landcover_uri][cover_id]
except KeyError:
before = 0
try:
after =unique_raster_values_count[scenario_uri][cover_id]
except KeyError:
after = 0
cover_dict[cover_id] = (before, after)
htm.write("<h2>Change Table</h2>")
cover_names_dict = {}
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
cover_names_dict = {}
for cover in transition_dict:
cover_names_dict[cover] = transition_dict[cover]["Name"]
htm.write(generate_chart_html(cover_dict, cover_names_dict, workspace))
htm.write("<h2>Transition Matrix</h2>")
htm.write("\n<table BORDER=1>")
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
for initial_cover_id in initial_cover_id_list:
htm.write("\n<tr><td>%i</td>" % initial_cover_id)
for scenario_cover_id in scenario_cover_id_list:
try:
htm.write("<td>%i</td>" % transitions[0][initial_cover_id][scenario_cover_id])
except KeyError:
htm.write("<td><FONT COLOR=lightgray>%i</FONT></td>" % 0)
htm.write("\n</tr>")
htm.write("\n</table>")
unconverted_cover_id_list = unconverted_pixels.keys()
unconverted_cover_id_list.sort()
if len(unconverted_cover_id_list) > 0:
htm.write("<h2>Unconverted Pixels</h2>")
htm.write("\n<table BORDER=1>")
htm.write("<tr><td>ID</td><td>Count</td></tr>")
for cover_id in unconverted_cover_id_list:
htm.write("<tr><td>%i</td><td>%i</td></tr>" % (cover_id, unconverted_pixels[cover_id]))
htm.write("\n</table>")
else:
htm.write("<p><i>All target pixels converted.</i></p>")
htm.write("\n</html>")
#input CSVs
input_csv_list = []
if args["calculate_priorities"]:
input_csv_list.append((args["priorities_csv_uri"], "Priorities Table"))
if args["calculate_transition"] or args["calculate_factors"]:
input_csv_list.append((args["transition"], "Transition Table"))
if args["calculate_factors"]:
input_csv_list.append((args["suitability"], "Factors Table"))
htm.write("<h1>Input Tables</h1>")
for csv_uri, name in input_csv_list:
table = "\n<table BORDER=1><tr><td>" + open(csv_uri).read().strip().replace(",","</td><td>").replace("\n","</td></tr><tr><td>") + "</td></tr></table>"
htm.write("<h2>%s</h2>" % name)
htm.write(table)
htm.write("\n</div>\n</body>\n</html>")
htm.close()
| 42.042076
| 166
| 0.583283
|
import sys
import os
import math
import shutil
import disk_sort
import struct
import operator
import logging
from decimal import Decimal
from fractions import Fraction
import numpy
from scipy.linalg import eig
import scipy.ndimage
import cProfile
import pstats
from osgeo import gdal, ogr
import pygeoprocessing.geoprocessing
import shutil
logging.basicConfig(format='%(asctime)s %(name)-20s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.scenario_generator.scenario_generator')
def calculate_weights(arr, rounding=4):
PLACES = Decimal(10) ** -(rounding)
evas, eves = eig(arr)
eva = max(evas)
eva_idx = evas.tolist().index(eva)
eve = eves.take((eva_idx,), axis=1)
normalized = eve / sum(eve)
vector = [abs(e[0]) for e in normalized]
return [ Decimal( str(v) ).quantize(PLACES) for v in vector ]
def calculate_priority(table_uri):
table = [line.strip().split(",") for line in open(table_uri).readlines()]
id_index = table[0].index("Id")
cover_id_list = [row[id_index] for row in table]
cover_id_list.pop(0)
cover_id_index_list = [table[0].index(cover_id) for cover_id in cover_id_list]
matrix = numpy.zeros((len(cover_id_list),len(cover_id_list)))
for row in range(len(cover_id_list)):
for col in range(row+1):
matrix[row][col] = float(table[row+1][cover_id_index_list[col]])
matrix[col][row] = 1 / matrix[row][col]
cover_id_list = [int(cover_id) for cover_id in cover_id_list]
return dict(zip(cover_id_list, calculate_weights(matrix, 4)))
def calculate_distance_raster_uri(dataset_in_uri, dataset_out_uri):
pygeoprocessing.geoprocessing.distance_transform_edt(dataset_in_uri, dataset_out_uri)
def pixel_to_meters_op(x):
x[x != nodata] *= cell_size
return x
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_in_uri)
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_out_uri)
tmp = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.vectorize_datasets(
[dataset_out_uri], \
pixel_to_meters_op, \
tmp, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets(
[tmp], \
identity_op, \
dataset_out_uri, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(dataset_out_uri)
odes = list(lulc_codes)
lulc_codes.sort()
if len(lulc_codes) < 2 ** 8:
data_type = gdal.GDT_UInt16
shift = 8
elif len(lulc_codes) < 2 ** 16:
data_type = gdal.GDT_UInt32
shift = 16
else:
raise ValueError, "Too many LULC codes."
reclass_orig_dict = dict(zip(lulc_codes,range(1,len(lulc_codes)+1)))
reclass_dest_dict = {}
for key in reclass_orig_dict:
reclass_dest_dict[key] = reclass_orig_dict[key] << shift
def add_op(orig, dest):
return orig + dest
counts={}
for i in range(len(dataset_uri_list)-1):
orig_uri = pygeoprocessing.geoprocessing.temporary_filename()
dest_uri = pygeoprocessing.geoprocessing.temporary_filename()
multi_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i],
reclass_orig_dict,
orig_uri,
data_type,
nodata,
exception_flag="values_required")
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i+1],
reclass_dest_dict,
dest_uri,
data_type,
nodata,
exception_flag="values_required")
pygeoprocessing.geoprocessing.vectorize_datasets([orig_uri, dest_uri],
add_op,
multi_uri,
data_type,
nodata,
cell_size,
"union")
counts[i]=pygeoprocessing.geoprocessing.unique_raster_values_count(multi_uri, False)
restore_classes = {}
for key in reclass_orig_dict:
restore_classes[reclass_orig_dict[key]] = key
restore_classes[nodata] = lulc_nodata
LOGGER.debug("Decoding transition table.")
transitions = {}
for key in counts:
transitions[key]={}
for k in counts[key]:
try:
orig = restore_classes[k % (2**shift)]
except KeyError:
orig = lulc_nodata
try:
dest = restore_classes[k >> shift]
except KeyError:
dest = lulc_nodata
try:
transitions[key][orig][dest] = counts[key][k]
except KeyError:
transitions[key][orig] = {dest : counts[key][k]}
return unique_raster_values_count, transitions
def generate_chart_html(cover_dict, cover_names_dict, workspace_dir):
html = "\n<table BORDER=1>"
html += "\n<TR><td>Id</td><td>% Before</td><td>% After</td></TR>"
cover_id_list = cover_dict.keys()
cover_id_list.sort()
cover_id_list_chart = cover_names_dict.keys()
cover_id_list_chart.sort()
pixcount = 0
for cover_id in cover_id_list:
pixcount += cover_dict[cover_id][0]
pixcount = float(pixcount)
for cover_id in cover_id_list:
html += "\n<TR><td>%i</td><td>%i</td><td>%i</td></TR>" % (cover_id,
(cover_dict[cover_id][0] / pixcount) * 100,
(cover_dict[cover_id][1] / pixcount) * 100 )
html += "\n<table>"
thecharts = [
['Original',0],
['Final',1],
['Change',2]
]
hainitial = ""
hainitialnegative = ""
hainitiallist = []
hafinal = ""
hafinalnegative = ""
hafinallist = []
hachange = ""
hachangelist = []
haall = []
initialcover = []
finalcover = []
for cover_id in cover_id_list_chart:
try:
initialcover.append((cover_dict[cover_id][0] / pixcount) * 100)
except KeyError:
initialcover.append(0)
try:
finalcover.append((cover_dict[cover_id][1] / pixcount) * 100)
except KeyError:
finalcover.append(0)
html += "<style type='text/css'>"
html += "body {font-family: Arial, Helvetica, sans-serif; font-size: 0.9em;}"
html += "table#results {margin: 20px auto}"
html += "table#results th {text-align: left}"
html += "</style>"
html += "<script type='text/javascript'>\n"
html += "var chart,\n"
categories = []
html += "categories = ["
for cover_id in cover_id_list_chart:
categories.append("'"+cover_names_dict[cover_id]+"'")
html += ",".join(categories)
html += "]\n"
html +="$(document).ready(function() {\n"
for x in initialcover:
hainitial = hainitial +str(x)+","
hainitialnegative = hainitialnegative + "0,"
hainitiallist.append(float(x))
temp = []
temp.append(hainitial)
temp.append(hainitialnegative)
haall.append(temp)
thecharts[0].append(max(hainitiallist))
thecharts[0].append(min(hainitiallist))
for x in finalcover:
hafinal = hafinal +str(x)+","
hafinalnegative = hafinalnegative + "0,"
hafinallist.append(float(x))
temp = []
temp.append(hafinal)
temp.append(hafinalnegative)
haall.append(temp)
thecharts[1].append(max(hafinallist))
thecharts[1].append(min(hafinallist))
for x in range(len(initialcover)):
hachange = hachange + str(float(finalcover[x]) - float(initialcover[x]))+","
hachangelist.append(float(finalcover[x]) - float(initialcover[x]))
hachangelistnegative = ""
hachangelistpositive = ""
for item in hachangelist:
if item < 0:
hachangelistnegative=hachangelistnegative+str(item)+","
hachangelistpositive=hachangelistpositive+"0,"
else:
hachangelistpositive=hachangelistpositive+str(item)+","
hachangelistnegative=hachangelistnegative+"0,"
temp = []
temp.append(hachangelistpositive)
temp.append(hachangelistnegative)
haall.append(temp)
thecharts[2].append(max(hachangelist))
thecharts[2].append(min(hachangelist))
if thecharts[0][2] > thecharts[1][2]:
thecharts[1][2] = thecharts[0][2]
thecharts[2][2] = thecharts[0][2]
else:
thecharts[0][2] = thecharts[1][2]
thecharts[2][2] = thecharts[1][2]
for x in thecharts:
if x[0] == 'Change':
themin = x[3]
else:
themin = 0
html += "chart = new Highcharts.Chart({\n"
html += "chart: {renderTo: '"+x[0]+"container',defaultSeriesType: 'bar'},"
html += "title: {text: '"+x[0]+" Landcover'},"
html += "subtitle: {text: ''},"
html += "xAxis: [{categories: categories,reversed: false}, {opposite: true, reversed: false,categories: categories,linkedTo: 0}],"
html += "yAxis: {title: {text: null},labels: {formatter: function(){return Math.abs(this.value)}},min: "+str(themin)+",max: "+str(x[2])+"},"
html += "plotOptions: {series: { stacking: 'normal', showInLegend: false } },"
html += "tooltip: { formatter: function(){return '<b>'+ this.point.category +'</b><br/>'+'Area: '+ Highcharts.numberFormat(Math.abs(this.point.y), 0)+'%';}},"
html += "series: [{name: '',"
html += "data: ["+haall[x[1]][0]+"]}, {"
html += "name: '',"
html += "data: ["+haall[x[1]][1]+"]}]});\n"
html += "});\n"
html += "</script>\n"
for x in thecharts:
html += "<div id='"+x[0]+"container' style='width: 800px; height: 400px; margin: 20px 0'></div>\n"
return html
def filter_fragments(input_uri, size, output_uri):
LOGGER.debug("Filtering patches smaller than %i from %s.", size, input_uri)
src_ds = gdal.Open(input_uri)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
driver = gdal.GetDriverByName("GTiff")
driver.CreateCopy(output_uri, src_ds, 0 )
dst_ds = gdal.Open(output_uri, 1)
dst_band = dst_ds.GetRasterBand(1)
dst_array = numpy.copy(src_array)
suitability_values = numpy.unique(src_array)
if suitability_values[0] == 0:
suitability_values = suitability_values[1:]
eight_connectedness = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
four_connectedness = numpy.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
suitability_values_count = suitability_values.size
for v in range(1, suitability_values_count):
LOGGER.debug('Processing suitability value ' + \
str(suitability_values.size - v))
value = suitability_values[v]
mask = src_array == value
ones_in_mask = numpy.sum(mask)
label_im, nb_labels = scipy.ndimage.label(mask, four_connectedness)
fragment_sizes = \
scipy.ndimage.sum(mask, label_im, range(nb_labels + 1))
fragment_labels = numpy.array(range(nb_labels + 1))
small_fragment_mask = numpy.where(fragment_sizes <= size)
small_fragment_sizes = fragment_sizes[small_fragment_mask]
small_fragment_labels = fragment_labels[small_fragment_mask]
combined_small_fragment_size = numpy.sum(small_fragment_sizes)
fragments_location = scipy.ndimage.find_objects(label_im, nb_labels)
removed_pixels = 0
small_fragment_labels_count = small_fragment_labels.size
for l in range(small_fragment_labels_count-1):
label = small_fragment_labels[l+1]
last_label = small_fragment_labels[l]
size = small_fragment_sizes[l+1]
source = label_im[fragments_location[last_label]]
target = dst_array[fragments_location[last_label]]
pixels_to_remove = numpy.where(source == label)
target[pixels_to_remove] = 0
dst_band.WriteArray(dst_array)
def sum_uri(dataset_uri, datasource_uri):
"""Wrapper call to pygeoprocessing.geoprocessing.aggregate_raster_values_uri to extract total
:param dataset_uri: The uri for the input raster.
:type dataset_uri: str
:return: None
:rtype: None
"""
total = pygeoprocessing.geoprocessing.aggregate_raster_values_uri(dataset_uri, datasource_uri).total
return total.__getitem__(total.keys().pop())
def execute(args):
if ('transition' in args) and ('suitability' in args):
assert args['transition'] != args['suitability'], \
'Transition and suitability tables are the same: ' + \
args['transition'] + '. The model expects different tables.'
args["transition_id"] = "Id"
args["percent_field"] = "Percent Change"
args["area_field"] = "Area Change"
args["priority_field"] = "Priority"
args["proximity_field"] = "Proximity"
args["proximity_weight"] = "0.3"
args["patch_field"] = "Patch ha"
args["suitability_id"] = "Id"
args["suitability_layer"] = "Layer"
args["suitability_weight"] = "Wt"
args["suitability_field"] = "Suitfield"
args["distance_field"] = "Dist"
args["suitability_cover_id"] = "Cover ID"
args["returns_cover_id"] = "Cover ID"
args["returns_layer"] = "/Users/olwero/Dropbox/Work/Ecosystem_Services/NatCap/Olympics/2014/Scenarios/Exercise/inputtest/returns.csv"
workspace = args["workspace_dir"]
if not os.path.exists(workspace):
os.makedirs(workspace)
landcover_uri = args["landcover"]
if len(args["suffix"]) > 0:
suffix = "_" + args["suffix"].strip("_")
else:
suffix = ""
intermediate_dir = "intermediate"
if not os.path.exists(os.path.join(workspace, intermediate_dir)):
os.makedirs(os.path.join(workspace, intermediate_dir))
proximity_weight = float(args["proximity_weight"])
try:
physical_suitability_weight = float(args["weight"])
except KeyError:
physical_suitability_weight = 0.5
r_resample_uri = os.path.join(workspace, "resample" + suffix + ".tif")
landcover_transition_uri = os.path.join(workspace,"transitioned" + suffix + ".tif")
override_dataset_uri = os.path.join(workspace,"override" + suffix + ".tif")
landcover_htm_uri = os.path.join(workspace,"scenario-output-summary" + suffix + ".html")
pygeoprocessing.geoprocessing.create_directories([workspace])
transition_name = os.path.join(intermediate_dir, "transition_%i" + suffix + ".tif")
suitability_name = os.path.join(intermediate_dir, "%s_%s" + suffix + ".tif")
normalized_name = os.path.join(intermediate_dir, "%s_%s_norm" + suffix + ".tif")
combined_name = os.path.join(intermediate_dir, "factors_%s" + suffix + ".tif")
constraints_name = os.path.join(intermediate_dir, "constraints" + suffix + ".tif")
filter_name = os.path.join(intermediate_dir, "filter_%i" + suffix + ".tif")
factors_name = os.path.join(intermediate_dir, "suitability_%s" + suffix + ".tif")
cover_name = os.path.join(intermediate_dir, "cover_%i" + suffix + ".tif")
proximity_name = os.path.join(intermediate_dir, "proximity_%s" + suffix + ".tif")
normalized_proximity_name = os.path.join(intermediate_dir, "proximity_norm_%s" + suffix + ".tif")
adjusted_suitability_name = os.path.join(intermediate_dir, "adjusted_suitability_%s" + suffix + ".tif")
scenario_name = "scenario" + suffix + ".tif"
raster_format = "GTiff"
transition_type = gdal.GDT_Int16
transition_nodata = -1
change_nodata = -9999
transition_scale = 10
distance_scale = 100
suitability_nodata = 0
suitability_type = gdal.GDT_Int16
def suitability_op(trans, suit):
if trans == 0:
return 0
return round(((1 - physical_suitability_weight) * trans)\
+ (physical_suitability_weight * suit))
ds_type = "GTiff"
driver = gdal.GetDriverByName(ds_type)
if not any([args["calculate_transition"],
args["calculate_factors"],
args["override_layer"]]):
msg = "You must select at least one of the following: specify transitions, use factors, or override layer."
LOGGER.error(msg)
raise ValueError, msg
n"] and not(args["calculate_transition"] or args["calculate_factors"]):
msg = "Transition table provided but not used."
LOGGER.warn(msg)
transition_dict = {}
if args["calculate_transition"] or args["calculate_factors"]:
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
landcover_count_dict = pygeoprocessing.geoprocessing.unique_raster_values_count(landcover_uri)
missing_lulc = set(landcover_count_dict).difference(transition_dict.keys())
if len(missing_lulc) > 0 :
missing_lulc = list(missing_lulc)
missing_lulc.sort()
mising_lulc = ", ".join([str(l) for l in missing_lulc])
msg = "Missing suitability information for cover(s) %s." % missing_lulc
LOGGER.error(msg)
raise ValueError, msg
for cover_id in transition_dict:
#raise error if percent change for new LULC
if (transition_dict[cover_id][args["percent_field"]] > 0) and not (cover_id in landcover_count_dict):
msg = "Cover %i does not exist in LULC and therefore cannot have a percent change." % cover_id
LOGGER.error(msg)
raise ValueError, msg
#raise error if change by percent and area both specified
if (transition_dict[cover_id][args["percent_field"]] > 0) and (transition_dict[cover_id][args["area_field"]] > 0):
msg = "Cover %i cannot have both an increase by percent and area." % cover_id
LOGGER.error(msg)
raise ValueError, msg
##factor parameters validation
if args["calculate_factors"]:
pass
#error if overall physical weight not in [0, 1] range
##factor table validation
#if polygon no distance field allowed
#if point or line, integer distance field only
#error if same factor twice for same coverage
###
#resample, align and rasterize data
###
if args["calculate_priorities"]:
LOGGER.info("Calculating priorities.")
priorities_dict = calculate_priority(args["priorities_csv_uri"])
#check geographic extents, projections
## #validate resampling size
## if args["resolution"] != "":
## if args["resolution"] < pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri):
## msg = "The analysis resolution cannot be smaller than the input."
## LOGGER.error(msg)
## raise ValueError, msg
##
## else:
## LOGGER.info("Resampling land cover.")
## #gdal.GRA_Mode might be a better resample method, but requires GDAL >= 1.10.0
## bounding_box = pygeoprocessing.geoprocessing.get_bounding_box(landcover_uri)
## pygeoprocessing.geoprocessing.resize_and_resample_dataset_uri(landcover_uri,
## bounding_box,
## args["resolution"],
## landcover_resample_uri,
## "nearest")
## LOGGER.debug("Changing landcover uri to resampled uri.")
## landcover_uri = landcover_resample_uri
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri)
suitability_transition_dict = {}
if args["calculate_transition"]:
for next_lulc in transition_dict:
this_uri = os.path.join(workspace, transition_name % next_lulc)
#construct reclass dictionary
reclass_dict = {}
all_zeros = True
for this_lulc in transition_dict:
value = int(transition_dict[this_lulc][str(next_lulc)])
reclass_dict[this_lulc] = value * transition_scale
all_zeros = all_zeros and (value == 0)
if not all_zeros:
#reclass lulc by reclass_dict
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
this_uri,
transition_type,
suitability_nodata,
exception_flag = "values_required")
#changing nodata value so 0's no longer nodata
dataset = gdal.Open(this_uri, 1)
band = dataset.GetRasterBand(1)
nodata = band.SetNoDataValue(transition_nodata)
dataset = None
suitability_transition_dict[next_lulc] = this_uri
suitability_factors_dict = {}
if args["calculate_factors"]:
factor_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["suitability"], args["suitability_id"])
factor_uri_dict = {}
factor_folder = args["suitability_folder"]
if not args["factor_inclusion"]:
option_list=["ALL_TOUCHED=TRUE"]
else:
option_list = ["ALL_TOUCHED=FALSE"]
for factor_id in factor_dict:
factor = factor_dict[factor_id][args["suitability_layer"]]
factor_stem, _ = os.path.splitext(factor)
suitability_field_name = factor_dict[factor_id][args["suitability_field"]]
distance = factor_dict[factor_id][args["distance_field"]]
cover_id = int(factor_dict[factor_id][args["suitability_cover_id"]])
weight = int(factor_dict[factor_id][args["suitability_weight"]])
LOGGER.debug("Found reference to factor (%s, %s, %s) for cover %i.", factor_stem, suitability_field_name, distance, cover_id)
if not (factor_stem, suitability_field_name, distance) in factor_uri_dict:
factor_uri = os.path.join(factor_folder, factor)
if not os.path.exists(factor_uri):
msg = "Missing file %s." % factor_uri
LOGGER.error(msg)
raise ValueError, msg
shape_type = get_geometry_type_from_uri(factor_uri)
LOGGER.debug("Processing %s.", shapeTypes[shape_type])
if shape_type in [5, 15, 25, 31]:
LOGGER.info("Rasterizing %s using sutibality field %s.", factor_stem, suitability_field_name)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, suitability_field_name))
burn_value = [1]
suitability_field = ["ATTRIBUTE=%s" % suitability_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 0)
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list=option_list + suitability_field)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = ds_uri
elif shape_type in [1, 3, 8, 11, 13, 18, 21, 23, 28]:
# hit the center pixel, so we use ALL_TOUCHED=TRUE
option_list=["ALL_TOUCHED=TRUE"]
distance = int(distance)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_raster'))
distance_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_distance'))
fdistance_uri = os.path.join(workspace, suitability_name % (factor_stem, distance))
normalized_uri = os.path.join(workspace, normalized_name % (factor_stem, distance))
burn_value = [1]
LOGGER.info("Buffering rasterization of %s to distance of %i.", factor_stem, distance)
gdal_format = gdal.GDT_Byte
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, -1, gdal_format)
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
ds_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(ds_uri)
pygeoprocessing.geoprocessing.vectorize_datasets([landcover_uri], \
lambda x: 0 if x != landcover_nodata else -1, \
ds_uri, \
pygeoprocessing.geoprocessing.get_datatype_from_uri(ds_uri), \
ds_nodata, \
pygeoprocessing.geoprocessing.get_cell_size_from_uri(ds_uri), \
'intersection')
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list)
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
result = numpy.where(value > distance, transition_nodata, value)
return numpy.where(value == transition_nodata, transition_nodata, result)
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union",
vectorize_op = False)
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(fdistance_uri)
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
def normalize_op(value):
diff = float(maximum - minimum)
return numpy.where(
value == transition_nodata,
suitability_nodata,
((distance_scale - 1) - (((value - minimum) / \
diff) * (distance_scale - 1))) + 1)
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = normalized_uri
else:
raise ValueError, "Invalid geometry type %i." % shape_type
# Apply nodata to the factors raster
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
temp_uri = pygeoprocessing.geoprocessing.temporary_filename()
def apply_nodata_op(landcover, value):
return numpy.where(landcover == landcover_uri, 0, value)
pygeoprocessing.geoprocessing.vectorize_datasets( \
[landcover_uri,
factor_uri_dict[(factor_stem, suitability_field_name, distance)]],
apply_nodata_op,
temp_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets( \
[temp_uri],
identity_op,
factor_uri_dict[(factor_stem, suitability_field_name, distance)],
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
else:
LOGGER.debug("Skipping already processed suitability layer.")
LOGGER.debug("Adding factor (%s, %s, %s) to cover %i suitability list.", factor_stem, suitability_field_name, distance, cover_id)
if cover_id in suitability_factors_dict:
suitability_factors_dict[cover_id].append((factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight))
else:
suitability_factors_dict[cover_id] = [(factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight)]
for cover_id in suitability_factors_dict:
if len(suitability_factors_dict[cover_id]) > 1:
LOGGER.info("Combining factors for cover type %i.", cover_id)
ds_uri = os.path.join(workspace, combined_name % cover_id)
uri_list, weights_list = apply(zip, suitability_factors_dict[cover_id])
total = float(sum(weights_list))
weights_list = [weight / total for weight in weights_list]
def weighted_op(*values):
result = (values[0] * weights_list[0]).astype(float)
for v, w in zip(values[1:], weights_list[1:]):
result += v * w
return result
# print('------files:', uri_list, weights_list)
pygeoprocessing.geoprocessing.vectorize_datasets(list(uri_list),
weighted_op,
ds_uri,
suitability_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
suitability_factors_dict[cover_id] = ds_uri
else:
suitability_factors_dict[cover_id] = suitability_factors_dict[cover_id][0][0]
suitability_dict = {}
if args["calculate_transition"]:
suitability_dict = suitability_transition_dict
if args["calculate_factors"]:
for cover_id in suitability_factors_dict:
if cover_id in suitability_dict:
LOGGER.info("Combining suitability for cover %i.", cover_id)
ds_uri = os.path.join(workspace, factors_name % cover_id)
print('cover_ids', suitability_dict.keys())
pygeoprocessing.geoprocessing.vectorize_datasets([suitability_transition_dict[cover_id],
suitability_factors_dict[cover_id]],
suitability_op,
ds_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = ds_uri
else:
suitability_dict[cover_id] = suitability_factors_dict[cover_id]
elif args["calculate_factors"]:
suitability_dict = suitability_factors_dict
#clump and sieve
for cover_id in transition_dict:
if (transition_dict[cover_id][args["patch_field"]] > 0) and (cover_id in suitability_dict):
LOGGER.info("Filtering patches from %i.", cover_id)
size = 10000 * int(math.ceil( \
transition_dict[cover_id][args["patch_field"]] / \
(cell_size ** 2)))
output_uri = os.path.join(workspace, filter_name % cover_id)
filter_fragments(suitability_dict[cover_id], size, output_uri)
suitability_dict[cover_id] = output_uri
###
#compute intermediate data if needed
###
#contraints raster (reclass using permability values, filters on clump size)
if args["calculate_constraints"]:
LOGGER.info("Rasterizing constraints.")
constraints_uri = args["constraints"]
constraints_field_name = args["constraints_field"]
constraints_ds_uri = os.path.join(workspace, constraints_name)
option_list = ["ALL_TOUCHED=FALSE"]
burn_value = [0]
constraints_field = ["ATTRIBUTE=%s" % constraints_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, constraints_ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 1)
pygeoprocessing.geoprocessing.rasterize_layer_uri(constraints_ds_uri, constraints_uri, burn_value, option_list=option_list + constraints_field)
# Check that the values make sense
raster = gdal.Open(constraints_ds_uri)
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
unique = numpy.unique(array)
assert (unique[0] >= 0.0) and (unique[-1] <= 1.0), \
'Invalid raster value in field ' + constraints_field_name + ' in ' \
+ constraints_uri
else:
LOGGER.info("Constraints not included.")
proximity_dict = {}
if args["calculate_proximity"]:
LOGGER.info("Calculating proximity.")
cover_types = transition_dict.keys()
for cover_id in transition_dict:
if transition_dict[cover_id][args["proximity_field"]] > 0 and cover_id in suitability_dict:
distance = int(transition_dict[cover_id][args["proximity_field"]])
LOGGER.info("Calculating proximity for %i.", cover_id)
reclass_dict = dict(zip(cover_types, [1] * len(cover_types)))
reclass_dict[cover_id] = 0
ds_uri = os.path.join(workspace, cover_name % cover_id)
distance_uri = pygeoprocessing.geoprocessing.temporary_filename()
fdistance_uri = os.path.join(workspace, proximity_name % cover_id)
normalized_uri = os.path.join(workspace, normalized_proximity_name % cover_id)
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
ds_uri,
transition_type,
transition_nodata,
exception_flag = "values_required")
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
if value > distance:
return transition_nodata
return value
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union")
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
assert minimum < maximum, "Wrong distance (min, max) = (" + \
str(minimum) + ", " + str(maximum) + ") in " + fdistance_uri
def normalize_op(value):
if value == transition_nodata:
return suitability_nodata
else:
return ((distance_scale - 1) \
- (((value - minimum) \
/ float(maximum - minimum)) \
* (distance_scale - 1))) \
+ 1
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union")
proximity_dict[cover_id] = normalized_uri
def es_change_op(final_es ,initial_es):
return final_es - initial_es
def constraint_op(suit, cons):
return suit * cons
def proximity_op(suit, prox):
v = suit + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
def constraint_proximity_op(suit, cons, prox):
v = (cons * suit) + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
for cover_id in suitability_dict:
suitability_uri = os.path.join(workspace, adjusted_suitability_name % cover_id)
if args["calculate_constraints"]:
if cover_id in proximity_dict:
LOGGER.info("Combining suitability, proximity, and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri,
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
else:
LOGGER.info("Combining suitability and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri]
# print('------suitability and constraint files:', uri_list)
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
elif cover_id in proximity_dict:
LOGGER.info("Combining suitability and proximity for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
#normalize probabilities to be on a 10 point scale
#probability raster (reclass using probability matrix)
#proximity raster (gaussian for each landcover type, using max distance)
#InVEST 2 uses 4-connectedness?
#combine rasters for weighting into sutibility raster, multiply proximity by 0.3
#[suitability * (1-factor weight)] + (factors * factor weight) or only single raster
###
#reallocate pixels (disk heap sort, randomly reassign equal value pixels, applied in order)
###
#copy initial LULC
scenario_uri = os.path.join(workspace, scenario_name)
src_ds = gdal.Open(landcover_uri)
n_cols = src_ds.RasterXSize
n_rows = src_ds.RasterYSize
dst_ds = driver.CreateCopy(scenario_uri, src_ds, 0)
dst_ds = None
src_ds = None
#identify LULC types undergoing change
change_list = []
if args["calculate_priorities"]:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
else:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
change_list.sort(reverse=True)
#change pixels
scenario_ds = gdal.Open(scenario_uri, 1)
scenario_band = scenario_ds.GetRasterBand(1)
scenario_array = scenario_band.ReadAsArray()
unconverted_pixels = {}
for index, (priority, cover_id, count) in enumerate(change_list):
LOGGER.debug("Increasing cover %i by %i pixels.", cover_id, count)
#open all lower priority suitability rasters and assign changed pixels value of 0
update_ds = {}
update_bands = {}
update_arrays = {}
for _, update_id, _ in change_list[index+1:]:
update_ds[update_id] = gdal.Open(suitability_dict[update_id], 1)
update_bands[update_id] = update_ds[update_id].GetRasterBand(1)
update_arrays[update_id] = update_bands[update_id].ReadAsArray()
##select pixels
#open suitability raster
src_ds = gdal.Open(suitability_dict[cover_id], 1)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
pixels_changed = 0
suitability_values = list(numpy.unique(src_array))
suitability_values.sort(reverse=True)
if suitability_values[-1]==0:
suitability_values.pop(-1)
for suitability_score in suitability_values:
# Check if suitsbility is between 0 and 100 inclusive
if abs(suitability_score - 50) > 50:
print('suitability_values:', suitability_dict[cover_id])
for v in suitability_values:
print v, ' ',
assert abs(suitability_score - 50) <= 50, \
'Invalid suitability score ' + str(suitability_score)
if pixels_changed == count:
LOGGER.debug("All necessay pixels converted.")
break
LOGGER.debug("Checking pixels with suitability of %i.", suitability_score)
#mask out everything except the current suitability score
mask = src_array == suitability_score
#label patches
label_im, nb_labels = scipy.ndimage.label(mask)
#get patch sizes
patch_sizes = scipy.ndimage.sum(mask, label_im, range(1, nb_labels + 1))
patch_labels = numpy.array(range(1, nb_labels + 1))
patch_locations = scipy.ndimage.find_objects(label_im, nb_labels)
#randomize patch order
numpy.random.shuffle(patch_labels)
#check patches for conversion
patch_label_count = patch_labels.size
for l in range(patch_label_count):
label = patch_labels[l]
source = label_im[patch_locations[label-1]]
target = scenario_array[patch_locations[label-1]]
pixels_to_change = numpy.where(source == label)
assert pixels_to_change[0].size == patch_sizes[label-1]
if patch_sizes[label-1] + pixels_changed > count:
#mask out everything except the current patch
#patch = numpy.where(label_im == label)
#patch_mask = numpy.zeros_like(scenario_array)
patch_mask = numpy.zeros_like(target)
#patch_mask[patch] = 1
patch_mask[pixels_to_change] = 1
#calculate the distance to exit the patch
#tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
#tmp_array = tmp_array[patch]
tmp_array = tmp_array[pixels_to_change]
#select the number of pixels that need to be converted
tmp_index = numpy.argsort(tmp_array)
tmp_index = tmp_index[:count - pixels_changed]
#convert the selected pixels into coordinates
#pixels_to_change = numpy.array(zip(patch[0], patch[1]))
pixels_to_change = numpy.array(zip(pixels_to_change[0], pixels_to_change[1]))
pixels_to_change = pixels_to_change[tmp_index]
pixels_to_change = apply(zip, pixels_to_change)
#change the pixels in the scenario
#scenario_array[pixels_to_change] = cover_id
target[pixels_to_change] = cover_id
pixels_changed = count
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
#update_arrays[update_id][pixels_to_change] = 0
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
break
else:
#convert patch, increase count of changes
target[pixels_to_change] = cover_id
pixels_changed += patch_sizes[label-1]
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
#report and record unchanged pixels
if pixels_changed < count:
LOGGER.warn("Not all pixels converted.")
unconverted_pixels[cover_id] = count - pixels_changed
#write new suitability arrays
for _, update_id, _ in change_list[index+1:]:
update_bands[update_id].WriteArray(update_arrays[update_id])
update_arrays[update_id] = None
update_bands[update_id] = None
update_ds[update_id] = None
scenario_band.WriteArray(scenario_array)
scenario_array = None
scenario_band = None
scenario_ds = None
#apply override
if args["override_layer"]:
LOGGER.info("Overriding pixels using values from field %s.", args["override_field"])
datasource = ogr.Open(args["override"])
layer = datasource.GetLayer()
dataset = gdal.Open(scenario_uri, 1)
if dataset == None:
msg = "Could not open landcover transition raster."
LOGGER.error(msg)
raise IOError, msg
if datasource == None:
msg = "Could not open override vector."
LOGGER.error(msg)
raise IOError, msg
if not bool(args["override_inclusion"]):
LOGGER.debug("Overriding all touched pixels.")
options = ["ALL_TOUCHED=TRUE", "ATTRIBUTE=%s" % args["override_field"]]
else:
LOGGER.debug("Overriding only pixels with covered center points.")
options = ["ATTRIBUTE=%s" % args["override_field"]]
gdal.RasterizeLayer(dataset, [1], layer, options=options)
dataset.FlushCache()
datasource = None
dataset = None
###
#tabulate coverages
###
unique_raster_values_count, transitions = get_transition_set_count_from_uri([landcover_uri, scenario_uri])
htm = open(landcover_htm_uri,'w')
htm.write("<html><head><title>Scenario Generator Report</title>")
htm.write("<style type='text/css'>")
htm.write("table {border-collapse: collapse; font-size: 1em;}")
htm.write("td {padding: 10px;}")
htm.write('body {font-family: Arial, Helvetica, sans-serif; font-size: 1em;}')
htm.write('h2 {background:
htm.write("</style>")
jquery_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "jquery-1.6.2.min.js")
htm.write("<script>\n" + open(jquery_uri).read() + "\n</script>")
highcharts_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "highcharts.js")
htm.write("<script>\n" + open(highcharts_uri).read() + "\n</script>")
htm.write("</head><body>")
htm.write("<div style=''>")
htm.write("<h1>Scenario Output Summary</h1>")
htm.write("<h2>Initial Landscape</h2>")
htm.write("\n<table BORDER=1>")
initial_cover_id_list = unique_raster_values_count[landcover_uri].keys()
initial_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[landcover_uri][cover_id]) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
htm.write("<h2>Scenario Landscape</h2>")
htm.write("\n<table BORDER=1>")
scenario_cover_id_list = unique_raster_values_count[scenario_uri].keys()
scenario_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[scenario_uri][cover_id]) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
cover_dict = {}
for cover_id in set(unique_raster_values_count[landcover_uri].keys()).union(set(unique_raster_values_count[scenario_uri].keys())):
try:
before = unique_raster_values_count[landcover_uri][cover_id]
except KeyError:
before = 0
try:
after =unique_raster_values_count[scenario_uri][cover_id]
except KeyError:
after = 0
cover_dict[cover_id] = (before, after)
htm.write("<h2>Change Table</h2>")
cover_names_dict = {}
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
cover_names_dict = {}
for cover in transition_dict:
cover_names_dict[cover] = transition_dict[cover]["Name"]
htm.write(generate_chart_html(cover_dict, cover_names_dict, workspace))
htm.write("<h2>Transition Matrix</h2>")
htm.write("\n<table BORDER=1>")
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
for initial_cover_id in initial_cover_id_list:
htm.write("\n<tr><td>%i</td>" % initial_cover_id)
for scenario_cover_id in scenario_cover_id_list:
try:
htm.write("<td>%i</td>" % transitions[0][initial_cover_id][scenario_cover_id])
except KeyError:
htm.write("<td><FONT COLOR=lightgray>%i</FONT></td>" % 0)
htm.write("\n</tr>")
htm.write("\n</table>")
unconverted_cover_id_list = unconverted_pixels.keys()
unconverted_cover_id_list.sort()
if len(unconverted_cover_id_list) > 0:
htm.write("<h2>Unconverted Pixels</h2>")
htm.write("\n<table BORDER=1>")
htm.write("<tr><td>ID</td><td>Count</td></tr>")
for cover_id in unconverted_cover_id_list:
htm.write("<tr><td>%i</td><td>%i</td></tr>" % (cover_id, unconverted_pixels[cover_id]))
htm.write("\n</table>")
else:
htm.write("<p><i>All target pixels converted.</i></p>")
htm.write("\n</html>")
#input CSVs
input_csv_list = []
if args["calculate_priorities"]:
input_csv_list.append((args["priorities_csv_uri"], "Priorities Table"))
if args["calculate_transition"] or args["calculate_factors"]:
input_csv_list.append((args["transition"], "Transition Table"))
if args["calculate_factors"]:
input_csv_list.append((args["suitability"], "Factors Table"))
htm.write("<h1>Input Tables</h1>")
for csv_uri, name in input_csv_list:
table = "\n<table BORDER=1><tr><td>" + open(csv_uri).read().strip().replace(",","</td><td>").replace("\n","</td></tr><tr><td>") + "</td></tr></table>"
htm.write("<h2>%s</h2>" % name)
htm.write(table)
htm.write("\n</div>\n</body>\n</html>")
htm.close()
| false
| true
|
79041ac936abcd6666ac87e8ba76e72dcda1e615
| 1,157
|
py
|
Python
|
CTCI/Data Structures/Trees/tries.py
|
Wmeng98/Leetcode
|
5c4d0ab211e637801f3ae11154850a476de2ca41
|
[
"MIT"
] | 4
|
2020-09-13T23:41:32.000Z
|
2022-01-08T21:27:58.000Z
|
CTCI/Data Structures/Trees/tries.py
|
Wmeng98/Leetcode
|
5c4d0ab211e637801f3ae11154850a476de2ca41
|
[
"MIT"
] | null | null | null |
CTCI/Data Structures/Trees/tries.py
|
Wmeng98/Leetcode
|
5c4d0ab211e637801f3ae11154850a476de2ca41
|
[
"MIT"
] | null | null | null |
'''
TRIES
Trie support search, insert, and deletion in O(L) time where L is length of the key
why Trie?
* With Trie, we can insert and find strings in O(L) time where L represent the length of a single word. This is obviously faster than BST.
This is also faster than Hashing because of the ways it is implemented. We do not need to compute any hash function. No collision handling
is required (like we do in open addressing and separate chaining)
* Another advantage of Trie is, we can easily print all words in alphabetical order which is not easily possible with hashing.
* We can efficiently do prefix search (or auto-complete) with Trie.
Issues with Trie
Faster but require HUGE memory for storing the strings
NOTE: Trie node class
struct TrieNode
{
struct TrieNode *children[ALPHABET_SIZE];
// isEndOfWord is true if the node
// represents end of a word
bool isEndOfWord;
};
'''
class TrieNode:
# Trie node class
def __init__(self):
self.children = [None]*26
# isEndOfWord is True if node represent the end of the word
self.isEndOfWord = False
| 28.925
| 147
| 0.703544
|
class TrieNode:
def __init__(self):
self.children = [None]*26
self.isEndOfWord = False
| true
| true
|
79041b525ccea400d5b22fa9f35211813cc266c9
| 5,755
|
py
|
Python
|
Backend/judgestatus/views.py
|
RAyymask/LPOJ
|
2f7ce194f1d510d8d006c2a35fdaa272f20ef1f3
|
[
"MIT"
] | 1
|
2022-03-16T09:49:55.000Z
|
2022-03-16T09:49:55.000Z
|
Backend/judgestatus/views.py
|
RAyymask/LPOJ
|
2f7ce194f1d510d8d006c2a35fdaa272f20ef1f3
|
[
"MIT"
] | null | null | null |
Backend/judgestatus/views.py
|
RAyymask/LPOJ
|
2f7ce194f1d510d8d006c2a35fdaa272f20ef1f3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.throttling import ScopedRateThrottle
from rest_framework.views import APIView
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from rest_framework.response import Response
from rest_framework import viewsets, mixins
from rest_framework.pagination import LimitOffsetPagination
from .models import JudgeStatus, CaseStatus
from .serializers import JudgeStatusSerializer, CaseStatusSerializer, JudgeStatusCodeSerializer
from .permission import ManagerOnly, UserRatingOnly, NoContestOnly
from contest.models import ContestInfo
from contest.serializers import ContestInfoSerializer
import datetime
class JudgeStatusView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.all().order_by('-id')
serializer_class = JudgeStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language", "problemtitle")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
def list(self, request, *args, **kwargs):
self.check_permissions(request)
self.check_throttles(request)
userid = request._request.session.get("user_id")
usertype = request._request.session.get("type")
cid = request._request.GET.get("contest",0)
if cid == "":
cid = 0
contestid = int(cid)
if contestid == 0:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
else: # 封榜特判
contest = ContestInfo.objects.get(id=contestid)
queryset = self.filter_queryset(self.get_queryset())
newpage = []
for data in queryset:
if usertype != 3 and userid != data.user and contest.lockboard == 1 and contest.lasttime - (data.submittime - contest.begintime).total_seconds() <= contest.locktime * 60:
data.result = -1
newpage.append(data)
page = self.paginate_queryset(newpage)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(newpage, many=True)
return Response(serializer.data)
class JudgeStatusPutView(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
permission_classes = (UserRatingOnly,)
throttle_scope = "judge"
throttle_classes = [ScopedRateThrottle, ]
class JudgeStatusCodeView(viewsets.GenericViewSet, mixins.RetrieveModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "problemtitle")
permission_classes = (NoContestOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class CaseStatusView(viewsets.ModelViewSet):
queryset = CaseStatus.objects.all()
serializer_class = CaseStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('username', 'problem', "statusid")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class ACRankView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.filter(submittime__gte=datetime.datetime.now()-datetime.timedelta(days=30),result=0) # 注意这里只是临时这么写!如果OJ使用的人多!这里会有性能问题!!# 这里有bug,不应该在queryset里写filter。时间会提前算好,导致不准确
serializer_class = JudgeStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class RejudgeAPIView(APIView):
permission_classes = (ManagerOnly,)
def post(self, request, format=None):
data = request.data
contestid = data.get('contestid', "")
problem = data.get('problem', "")
statusid = data.get('statusid', "")
statustype = data.get('statustype', "")
print(contestid, problem, statusid, statustype)
if contestid == 0 or problem == -1:
return Response("bad", status=HTTP_400_BAD_REQUEST)
if contestid != "" and problem != "":
JudgeStatus.objects.filter(contest=contestid).filter(
contestproblem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if problem != "" and contestid == "":
JudgeStatus.objects.filter(problem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statusid != "":
JudgeStatus.objects.filter(id=statusid).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statustype != "":
JudgeStatus.objects.filter(result=statustype).update(result=-1)
return Response("ok", status=HTTP_200_OK)
return Response("bad", status=HTTP_400_BAD_REQUEST)
| 40.244755
| 197
| 0.692441
|
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.throttling import ScopedRateThrottle
from rest_framework.views import APIView
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from rest_framework.response import Response
from rest_framework import viewsets, mixins
from rest_framework.pagination import LimitOffsetPagination
from .models import JudgeStatus, CaseStatus
from .serializers import JudgeStatusSerializer, CaseStatusSerializer, JudgeStatusCodeSerializer
from .permission import ManagerOnly, UserRatingOnly, NoContestOnly
from contest.models import ContestInfo
from contest.serializers import ContestInfoSerializer
import datetime
class JudgeStatusView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.all().order_by('-id')
serializer_class = JudgeStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language", "problemtitle")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
def list(self, request, *args, **kwargs):
self.check_permissions(request)
self.check_throttles(request)
userid = request._request.session.get("user_id")
usertype = request._request.session.get("type")
cid = request._request.GET.get("contest",0)
if cid == "":
cid = 0
contestid = int(cid)
if contestid == 0:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
else:
contest = ContestInfo.objects.get(id=contestid)
queryset = self.filter_queryset(self.get_queryset())
newpage = []
for data in queryset:
if usertype != 3 and userid != data.user and contest.lockboard == 1 and contest.lasttime - (data.submittime - contest.begintime).total_seconds() <= contest.locktime * 60:
data.result = -1
newpage.append(data)
page = self.paginate_queryset(newpage)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(newpage, many=True)
return Response(serializer.data)
class JudgeStatusPutView(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
permission_classes = (UserRatingOnly,)
throttle_scope = "judge"
throttle_classes = [ScopedRateThrottle, ]
class JudgeStatusCodeView(viewsets.GenericViewSet, mixins.RetrieveModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "problemtitle")
permission_classes = (NoContestOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class CaseStatusView(viewsets.ModelViewSet):
queryset = CaseStatus.objects.all()
serializer_class = CaseStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('username', 'problem', "statusid")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class ACRankView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.filter(submittime__gte=datetime.datetime.now()-datetime.timedelta(days=30),result=0) er
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class RejudgeAPIView(APIView):
permission_classes = (ManagerOnly,)
def post(self, request, format=None):
data = request.data
contestid = data.get('contestid', "")
problem = data.get('problem', "")
statusid = data.get('statusid', "")
statustype = data.get('statustype', "")
print(contestid, problem, statusid, statustype)
if contestid == 0 or problem == -1:
return Response("bad", status=HTTP_400_BAD_REQUEST)
if contestid != "" and problem != "":
JudgeStatus.objects.filter(contest=contestid).filter(
contestproblem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if problem != "" and contestid == "":
JudgeStatus.objects.filter(problem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statusid != "":
JudgeStatus.objects.filter(id=statusid).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statustype != "":
JudgeStatus.objects.filter(result=statustype).update(result=-1)
return Response("ok", status=HTTP_200_OK)
return Response("bad", status=HTTP_400_BAD_REQUEST)
| true
| true
|
79041ba585f4b98a982cafda9e8addb7801df625
| 356
|
py
|
Python
|
webapp/models.py
|
devsunny/flask-secure-upload
|
a69ffe6c53b5c6ba719516bb3e4379e39cacac75
|
[
"MIT"
] | null | null | null |
webapp/models.py
|
devsunny/flask-secure-upload
|
a69ffe6c53b5c6ba719516bb3e4379e39cacac75
|
[
"MIT"
] | null | null | null |
webapp/models.py
|
devsunny/flask-secure-upload
|
a69ffe6c53b5c6ba719516bb3e4379e39cacac75
|
[
"MIT"
] | null | null | null |
# models.py
from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
home = db.Column(db.String(1000))
| 32.363636
| 90
| 0.707865
|
from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
home = db.Column(db.String(1000))
| true
| true
|
79041c07ac974a91b0edeef507b9c29c334b71e3
| 667
|
py
|
Python
|
simplecoremidi/examples/play_a_scale.py
|
cclauss/simplecoremidi
|
cf951eefe4e85fc709c40248aa55682b7c7f3663
|
[
"MIT"
] | 23
|
2015-09-10T18:32:21.000Z
|
2022-03-25T14:47:57.000Z
|
simplecoremidi/examples/play_a_scale.py
|
Miselu/simplecoremidi
|
77a2c2dc69c3cca66eae5365eb6bd24c4998b7e2
|
[
"MIT"
] | null | null | null |
simplecoremidi/examples/play_a_scale.py
|
Miselu/simplecoremidi
|
77a2c2dc69c3cca66eae5365eb6bd24c4998b7e2
|
[
"MIT"
] | 9
|
2016-10-23T02:16:26.000Z
|
2022-03-25T14:47:59.000Z
|
from simplecoremidi import send_midi
from time import sleep
def play_a_scale():
root_note = 60 # This is middle C
channel = 1 # This is MIDI channel 1
note_on_action = 0x90
major_steps = [2, 2, 1, 2, 2, 2, 1, 0]
velocity = 127
note = root_note
for step in major_steps:
send_midi((note_on_action | channel,
note,
velocity))
sleep(0.1)
send_midi((note_on_action | channel,
note,
0)) # A note-off is just a note-on with velocity 0
note += step
sleep(0.2)
if __name__=='__main__':
while True:
play_a_scale()
| 24.703704
| 70
| 0.554723
|
from simplecoremidi import send_midi
from time import sleep
def play_a_scale():
root_note = 60
channel = 1
note_on_action = 0x90
major_steps = [2, 2, 1, 2, 2, 2, 1, 0]
velocity = 127
note = root_note
for step in major_steps:
send_midi((note_on_action | channel,
note,
velocity))
sleep(0.1)
send_midi((note_on_action | channel,
note,
0))
note += step
sleep(0.2)
if __name__=='__main__':
while True:
play_a_scale()
| true
| true
|
79041cccc4a095c51dc8b00eb5fe1c02ad5a666c
| 1,013
|
py
|
Python
|
test/test_docx_set_header_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2018-07-25T23:04:34.000Z
|
2021-08-10T16:43:10.000Z
|
test/test_docx_set_header_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 3
|
2020-11-23T10:46:48.000Z
|
2021-12-30T14:09:34.000Z
|
test/test_docx_set_header_request.py
|
Cloudmersive/Cloudmersive.APIClient.Python.Convert
|
dba2fe7257229ebdacd266531b3724552c651009
|
[
"Apache-2.0"
] | 2
|
2020-01-07T09:48:01.000Z
|
2020-11-23T10:47:00.000Z
|
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_set_header_request import DocxSetHeaderRequest # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxSetHeaderRequest(unittest.TestCase):
"""DocxSetHeaderRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxSetHeaderRequest(self):
"""Test DocxSetHeaderRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_set_header_request.DocxSetHeaderRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.707317
| 117
| 0.741362
|
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_set_header_request import DocxSetHeaderRequest
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxSetHeaderRequest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testDocxSetHeaderRequest(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
79041da34a10699dab1d4113d71953c80adccf16
| 7,552
|
py
|
Python
|
adafruit_imageload/gif/__init__.py
|
sajattack/Adafruit_CircuitPython_ImageLoad
|
d60a714b56a74c40867fd09330fb1f1be38b9e22
|
[
"MIT"
] | null | null | null |
adafruit_imageload/gif/__init__.py
|
sajattack/Adafruit_CircuitPython_ImageLoad
|
d60a714b56a74c40867fd09330fb1f1be38b9e22
|
[
"MIT"
] | null | null | null |
adafruit_imageload/gif/__init__.py
|
sajattack/Adafruit_CircuitPython_ImageLoad
|
d60a714b56a74c40867fd09330fb1f1be38b9e22
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
#
# Copyright (c) 2019 Paul Sajna for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_imageload.gif`
====================================================
Load pixel values (indices or colors) into one or more bitmaps and colors into a palette from a GIF file.
* Author(s): Paul Sajna
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ImageLoad.git"
bitmaps = []
def load(f):
bitmaps = []
palette = []
table = []
f.seek(3)
version = f.read(3)
if (version != b'89a') and (version != b'87a'):
raise RuntimeError("Invalid GIF version")
width = int.from_bytes(f.read(2), 'little')
height = int.from_bytes(f.read(2), 'little')
gct_header = int.from_bytes(f.read(1), 'little')
if (gct_header & 0b10000000) != 0b10000000:
raise NotImplementedError("Only gifs with a global color table are supported")
#if (gct_header & 0b0111000 >> 3) + 1 != 8:
#raise NotImplementedError("Only 8-bit color is supported")
gct_size = 2 ** ((gct_header & 0b00000111) + 1)
bg_color_index = int.from_bytes(f.read(1), 'little')
f.seek(1, 1) # seek one byte relative to the current position (skip a byte)
for i in range(gct_size):
color = f.read(3)
palette.append(color)
while True:
separator = f.read(1)
if separator:
separator = int.from_bytes(separator, 'little')
if separator == 0x21:
# Extension
label = int.from_bytes(f.read(1), 'little')
if label == 0xf9:
# Graphic Control Extension
print("Graphic Control Extension")
f.seek(1,1)
packed = int.from_bytes(f.read(1), 'little')
# delay in seconds between frames
delay = int.from_bytes(f.read(2), 'little') / 100
# We only care about the transparency flag for now
if packed & 1 == 1:
transparency_index = int.from_bytes(f.read(1), 'little')
else:
f.seek(1,1)
f.seek(1,1)
elif label == 0xff:
# Application Extension
print("Application Extension")
f.seek(1,1)
application = f.read(8)
if application == b'NETSCAPE':
f.seek(5,1)
loop_count = int.from_bytes(f.read(2), 'little')
f.seek(1,1)
else:
raise NotImplementedError("Unimplemented application extension: "
+ ''.join([chr(b) for b in application]))
elif label == 0xfe:
# Comment Extension
comment = b''
while not comment.endswith(b'\0'):
byte = f.read(1)
comment += byte
comment = ''.join([chr(b) for b in comment])
print(comment)
else:
raise NotImplementedError("Unimplemented extension: " + hex(label))
elif separator == 0x2c:
# Image Descriptor
print("Image Descriptor")
image_start_x = int.from_bytes(f.read(2), 'little')
image_start_y = int.from_bytes(f.read(2), 'little')
image_width = int.from_bytes(f.read(2), 'little')
image_height = int.from_bytes(f.read(2), 'little')
# Ignore the packed fields for now
f.seek(1,1)
# Image Data
print("Image Data")
lzw_code_size = int.from_bytes(f.read(1), 'little')
compressed = bytearray()
while True:
block_size = int.from_bytes(f.read(1), 'little')
if block_size == 0:
break
compressed += f.read(block_size)
bitmap = decompress(compressed, lzw_code_size)
bitmaps.append(bitmap)
elif separator == 0x3b:
# Trailer
break
else:
raise RuntimeError("Got an unexpected separator: " + hex(separator))
def decompress(block, min_code_size):
clear_code = 1 << min_code_size
eoi_code = clear_code + 1
cur_code_size = min_code_size + 1
bit_offset = 0
code_stream = []
index_stream = []
table = []
prev_code = None
nextcode = clear_code + 2
while bit_offset < 8*(len(block)-1):
if nextcode == (1 << cur_code_size):
cur_code_size += 1
code = fetch_bits(block, cur_code_size, bit_offset)
#print(code, prev_code)
bit_offset += cur_code_size
if code == clear_code:
# print(table)
# print(len(table))
table = [[i] for i in range(1 << min_code_size)]
table.append([clear_code])
table.append([eoi_code])
# print(table)
nextcode = clear_code + 2
prev_code = None
print("table reset")
continue
elif code == eoi_code:
print("stop")
break
elif code < len(table):
index_stream.append(table[code])
k = [table[code][0]]
if prev_code is not None:
table.append(table[prev_code] + k)
nextcode +=1
elif prev_code is None:
raise ValueError("First code after a reset must be in the table")
else:
k = [table[prev_code][0]]
index_stream.append(table[prev_code] + k)
table.append(table[prev_code] + k)
nextcode +=1
prev_code = code
#nextcode = len(table)
index_stream = flatten(index_stream)
#print(index_stream)
return index_stream
def fetch_bits(bytearr, nbits, bit_offset):
byte_offset = bit_offset//8
rem = bit_offset % 8
bits = 0
for i in range(nbits):
bit = (bytearr[byte_offset] | (bytearr[byte_offset+1] << 8)) & (1 << (rem + i))
bits |= bit >> (rem)
return bits
def flatten(items, seqtypes=(list, tuple)):
for i, x in enumerate(items):
while i < len(items) and isinstance(items[i], seqtypes):
items[i:i+1] = items[i]
return items
| 39.747368
| 105
| 0.552039
|
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ImageLoad.git"
bitmaps = []
def load(f):
bitmaps = []
palette = []
table = []
f.seek(3)
version = f.read(3)
if (version != b'89a') and (version != b'87a'):
raise RuntimeError("Invalid GIF version")
width = int.from_bytes(f.read(2), 'little')
height = int.from_bytes(f.read(2), 'little')
gct_header = int.from_bytes(f.read(1), 'little')
if (gct_header & 0b10000000) != 0b10000000:
raise NotImplementedError("Only gifs with a global color table are supported")
gct_size = 2 ** ((gct_header & 0b00000111) + 1)
bg_color_index = int.from_bytes(f.read(1), 'little')
f.seek(1, 1)
for i in range(gct_size):
color = f.read(3)
palette.append(color)
while True:
separator = f.read(1)
if separator:
separator = int.from_bytes(separator, 'little')
if separator == 0x21:
label = int.from_bytes(f.read(1), 'little')
if label == 0xf9:
print("Graphic Control Extension")
f.seek(1,1)
packed = int.from_bytes(f.read(1), 'little')
delay = int.from_bytes(f.read(2), 'little') / 100
if packed & 1 == 1:
transparency_index = int.from_bytes(f.read(1), 'little')
else:
f.seek(1,1)
f.seek(1,1)
elif label == 0xff:
print("Application Extension")
f.seek(1,1)
application = f.read(8)
if application == b'NETSCAPE':
f.seek(5,1)
loop_count = int.from_bytes(f.read(2), 'little')
f.seek(1,1)
else:
raise NotImplementedError("Unimplemented application extension: "
+ ''.join([chr(b) for b in application]))
elif label == 0xfe:
comment = b''
while not comment.endswith(b'\0'):
byte = f.read(1)
comment += byte
comment = ''.join([chr(b) for b in comment])
print(comment)
else:
raise NotImplementedError("Unimplemented extension: " + hex(label))
elif separator == 0x2c:
print("Image Descriptor")
image_start_x = int.from_bytes(f.read(2), 'little')
image_start_y = int.from_bytes(f.read(2), 'little')
image_width = int.from_bytes(f.read(2), 'little')
image_height = int.from_bytes(f.read(2), 'little')
f.seek(1,1)
print("Image Data")
lzw_code_size = int.from_bytes(f.read(1), 'little')
compressed = bytearray()
while True:
block_size = int.from_bytes(f.read(1), 'little')
if block_size == 0:
break
compressed += f.read(block_size)
bitmap = decompress(compressed, lzw_code_size)
bitmaps.append(bitmap)
elif separator == 0x3b:
break
else:
raise RuntimeError("Got an unexpected separator: " + hex(separator))
def decompress(block, min_code_size):
clear_code = 1 << min_code_size
eoi_code = clear_code + 1
cur_code_size = min_code_size + 1
bit_offset = 0
code_stream = []
index_stream = []
table = []
prev_code = None
nextcode = clear_code + 2
while bit_offset < 8*(len(block)-1):
if nextcode == (1 << cur_code_size):
cur_code_size += 1
code = fetch_bits(block, cur_code_size, bit_offset)
bit_offset += cur_code_size
if code == clear_code:
table = [[i] for i in range(1 << min_code_size)]
table.append([clear_code])
table.append([eoi_code])
nextcode = clear_code + 2
prev_code = None
print("table reset")
continue
elif code == eoi_code:
print("stop")
break
elif code < len(table):
index_stream.append(table[code])
k = [table[code][0]]
if prev_code is not None:
table.append(table[prev_code] + k)
nextcode +=1
elif prev_code is None:
raise ValueError("First code after a reset must be in the table")
else:
k = [table[prev_code][0]]
index_stream.append(table[prev_code] + k)
table.append(table[prev_code] + k)
nextcode +=1
prev_code = code
index_stream = flatten(index_stream)
return index_stream
def fetch_bits(bytearr, nbits, bit_offset):
byte_offset = bit_offset//8
rem = bit_offset % 8
bits = 0
for i in range(nbits):
bit = (bytearr[byte_offset] | (bytearr[byte_offset+1] << 8)) & (1 << (rem + i))
bits |= bit >> (rem)
return bits
def flatten(items, seqtypes=(list, tuple)):
for i, x in enumerate(items):
while i < len(items) and isinstance(items[i], seqtypes):
items[i:i+1] = items[i]
return items
| true
| true
|
79041f3be1f5a467637167f0ce3a36bc282498b5
| 2,195
|
py
|
Python
|
ping/ping.py
|
yzs981130/Kurose-and-Ross-socket-programming-exercises-1
|
fd7a27b48e7de98a216fa3a8905758856e70d5d7
|
[
"CC0-1.0"
] | 63
|
2015-04-09T13:52:33.000Z
|
2022-01-25T10:46:29.000Z
|
ping/ping.py
|
shenaishiren/Kurose-and-Ross-socket-programming-exercises
|
fd7a27b48e7de98a216fa3a8905758856e70d5d7
|
[
"CC0-1.0"
] | 2
|
2016-02-23T06:19:06.000Z
|
2018-09-09T18:53:35.000Z
|
ping/ping.py
|
shenaishiren/Kurose-and-Ross-socket-programming-exercises
|
fd7a27b48e7de98a216fa3a8905758856e70d5d7
|
[
"CC0-1.0"
] | 74
|
2015-04-13T08:03:11.000Z
|
2022-01-25T10:46:49.000Z
|
#!/usr/bin/env python3
import sys
import argparse
import time
import socket
from socket import socket as Socket
def main():
# Command line arguments. Use a server_port > 1024 by default so that we can run
# server without sudo.
parser = argparse.ArgumentParser()
parser.add_argument('--server-port', '-p', default=2081, type=int,
help='Server_Port to use')
parser.add_argument('--run-server', '-s', action='store_true',
help='Run a ping server')
parser.add_argument('server_address', default='localhost',
help='Server to ping, no effect if running as a server.')
args = parser.parse_args()
if args.run_server:
return run_server(args.server_port)
else:
return run_client(args.server_address, args.server_port,)
def run_server(server_port):
"""Run the UDP pinger server
"""
# Create the server socket (to handle UDP requests using ipv4), make sure
# it is always closed by using with statement.
with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket:
# The socket stays connected even after this script ends. So in order
# to allow the immediate reuse of the socket (so that we can kill and
# re-run the server while debugging) we set the following option. This
# is potentially dangerous in real code: in rare cases you may get junk
# data arriving at the socket.
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Set the server port
server_socket.bind(('', server_port))
# Start accepting ping requests
print("Ping server ready on port", server_port)
while True:
# Receive message and send one back
_, client_address = server_socket.recvfrom(1024)
server_socket.sendto("".encode(), client_address)
return 0
def run_client(server_address, server_port):
"""Ping a UDP pinger server running at the given address
"""
# Fill in the client side code here.
raise NotImplementedError
return 0
if __name__ == "__main__":
sys.exit(main())
| 28.881579
| 84
| 0.653759
|
import sys
import argparse
import time
import socket
from socket import socket as Socket
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--server-port', '-p', default=2081, type=int,
help='Server_Port to use')
parser.add_argument('--run-server', '-s', action='store_true',
help='Run a ping server')
parser.add_argument('server_address', default='localhost',
help='Server to ping, no effect if running as a server.')
args = parser.parse_args()
if args.run_server:
return run_server(args.server_port)
else:
return run_client(args.server_address, args.server_port,)
def run_server(server_port):
with Socket(socket.AF_INET, socket.SOCK_DGRAM) as server_socket:
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', server_port))
print("Ping server ready on port", server_port)
while True:
_, client_address = server_socket.recvfrom(1024)
server_socket.sendto("".encode(), client_address)
return 0
def run_client(server_address, server_port):
raise NotImplementedError
return 0
if __name__ == "__main__":
sys.exit(main())
| true
| true
|
79041f456f081182daf44f3daac2ef9b888bf30f
| 6,912
|
py
|
Python
|
svg/path/parser.py
|
judithfan/sketch-rnn
|
4c269499439c08619e88b5cf6b421661f1648082
|
[
"MIT"
] | 1
|
2020-04-10T16:50:26.000Z
|
2020-04-10T16:50:26.000Z
|
svg/path/parser.py
|
judithfan/sketch-rnn
|
4c269499439c08619e88b5cf6b421661f1648082
|
[
"MIT"
] | null | null | null |
svg/path/parser.py
|
judithfan/sketch-rnn
|
4c269499439c08619e88b5cf6b421661f1648082
|
[
"MIT"
] | 2
|
2019-10-08T07:06:43.000Z
|
2020-02-20T20:22:30.000Z
|
# SVG Path specification parser
import re
from . import path
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j):
# In the SVG specs, initial movetos are absolute, even if
# specified as 'm'. This is the default behavior here as well.
# But if you pass in a current_pos variable, the initial moveto
# will be relative to that current_pos. This is useful.
elements = list(_tokenize_path(pathdef))
# Reverse for easy use of .pop()
elements.reverse()
segments = path.Path()
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
# Moveto command.
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
# when M is called, reset start_pos
# This behavior of Z is defined in svg spec:
# http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
start_pos = current_pos
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
elif command == 'Z':
# Close path
segments.append(path.Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
start_pos = None
command = None # You can't have implicit commands after closing.
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'C':
try:
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
except ValueError:
print elements
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
| 36.188482
| 85
| 0.54702
|
import re
from . import path
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j):
elements = list(_tokenize_path(pathdef))
elements.reverse()
segments = path.Path()
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
last_command = command
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
current_pos
command = 'L'
elif command == 'Z':
segments.append(path.Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
start_pos = None
command = None
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(path.Line(current_pos, pos))
current_pos = pos
elif command == 'C':
try:
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
except ValueError:
print elements
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(path.CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(path.Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
| false
| true
|
79041fba7b86fb54852e5aa1884db644c8730c8e
| 1,622
|
py
|
Python
|
ocp_resources/cdi_config.py
|
ibesso-rh/openshift-python-wrapper
|
01aa3234fc6333868222736d8f42e27943edb47a
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/cdi_config.py
|
ibesso-rh/openshift-python-wrapper
|
01aa3234fc6333868222736d8f42e27943edb47a
|
[
"Apache-2.0"
] | null | null | null |
ocp_resources/cdi_config.py
|
ibesso-rh/openshift-python-wrapper
|
01aa3234fc6333868222736d8f42e27943edb47a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT
from ocp_resources.resource import TIMEOUT, Resource
from ocp_resources.utils import TimeoutSampler
LOGGER = logging.getLogger(__name__)
class CDIConfig(Resource):
"""
CDIConfig object.
"""
api_group = Resource.ApiGroup.CDI_KUBEVIRT_IO
@property
def scratch_space_storage_class_from_spec(self):
return self.instance.spec.scratchSpaceStorageClass
@property
def scratch_space_storage_class_from_status(self):
return self.instance.status.scratchSpaceStorageClass
@property
def upload_proxy_url(self):
return self.instance.status.uploadProxyURL
def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT):
"""
Wait until upload proxy url is changed
Args:
timeout (int): Time to wait for CDI Config.
Returns:
bool: True if url is equal to uploadProxyURL.
"""
LOGGER.info(
f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL"
)
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=1,
exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT,
func=self.api.get,
field_selector=f"metadata.name=={self.name}",
)
for sample in samples:
if sample.items:
status = sample.items[0].status
current_url = status.uploadProxyURL
if current_url == uploadproxy_url:
return
| 27.965517
| 87
| 0.646732
|
import logging
from ocp_resources.constants import PROTOCOL_ERROR_EXCEPTION_DICT
from ocp_resources.resource import TIMEOUT, Resource
from ocp_resources.utils import TimeoutSampler
LOGGER = logging.getLogger(__name__)
class CDIConfig(Resource):
api_group = Resource.ApiGroup.CDI_KUBEVIRT_IO
@property
def scratch_space_storage_class_from_spec(self):
return self.instance.spec.scratchSpaceStorageClass
@property
def scratch_space_storage_class_from_status(self):
return self.instance.status.scratchSpaceStorageClass
@property
def upload_proxy_url(self):
return self.instance.status.uploadProxyURL
def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT):
LOGGER.info(
f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL"
)
samples = TimeoutSampler(
wait_timeout=timeout,
sleep=1,
exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT,
func=self.api.get,
field_selector=f"metadata.name=={self.name}",
)
for sample in samples:
if sample.items:
status = sample.items[0].status
current_url = status.uploadProxyURL
if current_url == uploadproxy_url:
return
| true
| true
|
790420a930db4cd3395ec455b590c58fd0be09a6
| 3,671
|
py
|
Python
|
zerver/webhooks/groove/view.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | 1
|
2020-04-09T18:34:44.000Z
|
2020-04-09T18:34:44.000Z
|
zerver/webhooks/groove/view.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/groove/view.py
|
pranayshahxyz/zulip
|
3da483487af79fde9dce2d21124dfa39b94936a5
|
[
"Apache-2.0"
] | null | null | null |
# Webhooks for external integrations.
from functools import partial
from typing import Any, Callable, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, \
check_send_webhook_message, get_http_headers_from_filename, \
validate_extract_webhook_http_header
from zerver.models import UserProfile
TICKET_STARTED_TEMPLATE = """
{customer_name} submitted new ticket [#{number}: {title}]({app_url}):
``` quote
{summary}
```
""".strip()
TICKET_ASSIGNED_TEMPLATE = "[#{number}: {title}]({app_url}) ({state}) assigned to {assignee_info}."
AGENT_REPLIED_TEMPLATE = """
{actor} {action} [ticket #{number}]({app_ticket_url}):
``` quote
{plain_text_body}
```
""".strip()
def ticket_started_body(payload: Dict[str, Any]) -> str:
return TICKET_STARTED_TEMPLATE.format(**payload)
def ticket_assigned_body(payload: Dict[str, Any]) -> Optional[str]:
state = payload['state']
kwargs = {
'state': 'open' if state == 'opened' else state,
'number': payload['number'],
'title': payload['title'],
'app_url': payload['app_url']
}
assignee = payload['assignee']
assigned_group = payload['assigned_group']
if assignee or assigned_group:
if assignee and assigned_group:
kwargs['assignee_info'] = '{assignee} from {assigned_group}'.format(**payload)
elif assignee:
kwargs['assignee_info'] = '{assignee}'.format(**payload)
elif assigned_group:
kwargs['assignee_info'] = '{assigned_group}'.format(**payload)
return TICKET_ASSIGNED_TEMPLATE.format(**kwargs)
else:
return None
def replied_body(payload: Dict[str, Any], actor: str, action: str) -> str:
actor_url = "http://api.groovehq.com/v1/{}/".format(actor + 's')
actor = payload['links']['author']['href'].split(actor_url)[1]
number = payload['links']['ticket']['href'].split("http://api.groovehq.com/v1/tickets/")[1]
body = AGENT_REPLIED_TEMPLATE.format(
actor=actor,
action=action,
number=number,
app_ticket_url=payload['app_ticket_url'],
plain_text_body=payload['plain_text_body']
)
return body
def get_event_handler(event: str) -> Callable[..., str]:
# The main reason for this function existence is because of mypy
handler = EVENTS_FUNCTION_MAPPER.get(event) # type: Any
if handler is None:
raise UnexpectedWebhookEventType("Groove", event)
return handler
@api_key_only_webhook_view('Groove')
@has_request_variables
def api_groove_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event = validate_extract_webhook_http_header(request, 'X_GROOVE_EVENT', 'Groove')
assert event is not None
handler = get_event_handler(event)
body = handler(payload)
topic = 'notifications'
if body is not None:
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
EVENTS_FUNCTION_MAPPER = {
'ticket_started': ticket_started_body,
'ticket_assigned': ticket_assigned_body,
'agent_replied': partial(replied_body, actor='agent', action='replied to'),
'customer_replied': partial(replied_body, actor='customer', action='replied to'),
'note_added': partial(replied_body, actor='agent', action='left a note on')
}
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GROOVE_EVENT")
| 34.308411
| 99
| 0.701989
|
from functools import partial
from typing import Any, Callable, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, \
check_send_webhook_message, get_http_headers_from_filename, \
validate_extract_webhook_http_header
from zerver.models import UserProfile
TICKET_STARTED_TEMPLATE = """
{customer_name} submitted new ticket [#{number}: {title}]({app_url}):
``` quote
{summary}
```
""".strip()
TICKET_ASSIGNED_TEMPLATE = "[#{number}: {title}]({app_url}) ({state}) assigned to {assignee_info}."
AGENT_REPLIED_TEMPLATE = """
{actor} {action} [ticket #{number}]({app_ticket_url}):
``` quote
{plain_text_body}
```
""".strip()
def ticket_started_body(payload: Dict[str, Any]) -> str:
return TICKET_STARTED_TEMPLATE.format(**payload)
def ticket_assigned_body(payload: Dict[str, Any]) -> Optional[str]:
state = payload['state']
kwargs = {
'state': 'open' if state == 'opened' else state,
'number': payload['number'],
'title': payload['title'],
'app_url': payload['app_url']
}
assignee = payload['assignee']
assigned_group = payload['assigned_group']
if assignee or assigned_group:
if assignee and assigned_group:
kwargs['assignee_info'] = '{assignee} from {assigned_group}'.format(**payload)
elif assignee:
kwargs['assignee_info'] = '{assignee}'.format(**payload)
elif assigned_group:
kwargs['assignee_info'] = '{assigned_group}'.format(**payload)
return TICKET_ASSIGNED_TEMPLATE.format(**kwargs)
else:
return None
def replied_body(payload: Dict[str, Any], actor: str, action: str) -> str:
actor_url = "http://api.groovehq.com/v1/{}/".format(actor + 's')
actor = payload['links']['author']['href'].split(actor_url)[1]
number = payload['links']['ticket']['href'].split("http://api.groovehq.com/v1/tickets/")[1]
body = AGENT_REPLIED_TEMPLATE.format(
actor=actor,
action=action,
number=number,
app_ticket_url=payload['app_ticket_url'],
plain_text_body=payload['plain_text_body']
)
return body
def get_event_handler(event: str) -> Callable[..., str]:
handler = EVENTS_FUNCTION_MAPPER.get(event)
if handler is None:
raise UnexpectedWebhookEventType("Groove", event)
return handler
@api_key_only_webhook_view('Groove')
@has_request_variables
def api_groove_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event = validate_extract_webhook_http_header(request, 'X_GROOVE_EVENT', 'Groove')
assert event is not None
handler = get_event_handler(event)
body = handler(payload)
topic = 'notifications'
if body is not None:
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
EVENTS_FUNCTION_MAPPER = {
'ticket_started': ticket_started_body,
'ticket_assigned': ticket_assigned_body,
'agent_replied': partial(replied_body, actor='agent', action='replied to'),
'customer_replied': partial(replied_body, actor='customer', action='replied to'),
'note_added': partial(replied_body, actor='agent', action='left a note on')
}
fixture_to_headers = get_http_headers_from_filename("HTTP_X_GROOVE_EVENT")
| true
| true
|
7904212f78e6a7b14fd1665ba8a9cc49c99b21b7
| 637
|
py
|
Python
|
scripts/release_helper/go.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
scripts/release_helper/go.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
scripts/release_helper/go.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
from common import IssueProcess, Common
from typing import Any, List
import os
# assignee dict which will be assigned to handle issues
_GO_OWNER = {'ArcturusZhang'}
# 'github assignee': 'token'
_ASSIGNEE_TOKEN_GO = {'ArcturusZhang': os.getenv('AZURESDK_BOT_TOKEN')}
class IssueProcessGo(IssueProcess):
pass
class Go(Common):
def __init__(self, issues, assignee_token, language_owner):
super(Go, self).__init__(issues, assignee_token, language_owner)
self.file_out_name = 'release_go_status.md'
def go_process(issues: List[Any]):
instance = Go(issues, _ASSIGNEE_TOKEN_GO, _GO_OWNER)
instance.run()
| 25.48
| 72
| 0.744113
|
from common import IssueProcess, Common
from typing import Any, List
import os
_GO_OWNER = {'ArcturusZhang'}
_ASSIGNEE_TOKEN_GO = {'ArcturusZhang': os.getenv('AZURESDK_BOT_TOKEN')}
class IssueProcessGo(IssueProcess):
pass
class Go(Common):
def __init__(self, issues, assignee_token, language_owner):
super(Go, self).__init__(issues, assignee_token, language_owner)
self.file_out_name = 'release_go_status.md'
def go_process(issues: List[Any]):
instance = Go(issues, _ASSIGNEE_TOKEN_GO, _GO_OWNER)
instance.run()
| true
| true
|
790422285f2b34d6e4f52b2a3c5f165cfc5cd144
| 2,083
|
py
|
Python
|
deep learning/test/test.py
|
ANI717/Self_Driving_CV_Repository
|
27faa8ca86966838998056a42973de292bc380cb
|
[
"MIT"
] | 1
|
2021-12-20T03:53:59.000Z
|
2021-12-20T03:53:59.000Z
|
deep learning/test/test.py
|
ANI717/Self_Driving_CV_Repository
|
27faa8ca86966838998056a42973de292bc380cb
|
[
"MIT"
] | null | null | null |
deep learning/test/test.py
|
ANI717/Self_Driving_CV_Repository
|
27faa8ca86966838998056a42973de292bc380cb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to Test Deep Learning Model.
Contains a pipeline to test a deep learning model.
Revision History:
2021-11-20 (ANI717 - Animesh Bala Ani): Baseline Software.
Example:
$ python3 test.py
"""
#___Import Modules:
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import config
from model import NvidiaNet
from dataset import ANI717Dataset
#___Main Method:
def main():
# Load Data
dataset = ANI717Dataset(config.TEST_CSV, config.IMG_SOURCE, transforms=config.TEST_TRANSFORMS)
loader = DataLoader(dataset, batch_size=1, shuffle=False)
# Initialize Model with Weights
model = NvidiaNet(in_channels=config.IMG_SHAPE[0]).to(config.DEVICE)
model.load_state_dict(torch.load(config.MODEL_FILE, map_location=config.DEVICE)["state_dict"])
model.eval()
# Initialize total correct number and counter
num_correct = 0.0
count = 0
# Loop through dataset
with torch.no_grad():
loop = tqdm(loader, position=0, leave=True)
for batch_idx, (inputs, z, x) in enumerate(loop):
# Enable GPU support is available
inputs = inputs.to(config.DEVICE)
if config.TRAIN_TYPE == 'z':
targets = z.unsqueeze(1).to(torch.float32).to(config.DEVICE)
else:
targets = x.unsqueeze(1).to(torch.float32).to(config.DEVICE)
# Calculate prediction
predictions = model(inputs)
# Update total correct number and counter
num_correct += sum(abs(torch.round(targets/config.ERROR_TOLERENCE) - torch.round(predictions/config.ERROR_TOLERENCE)) <= 1).item()
count += predictions.shape[0]
# Calculate accuracy
loop.set_postfix(accuracy=100*num_correct/count)
#___Driver Program:
if __name__ == "__main__":
main()
#
# end of file
"""ANI717"""
| 28.148649
| 142
| 0.614498
|
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import config
from model import NvidiaNet
from dataset import ANI717Dataset
def main():
dataset = ANI717Dataset(config.TEST_CSV, config.IMG_SOURCE, transforms=config.TEST_TRANSFORMS)
loader = DataLoader(dataset, batch_size=1, shuffle=False)
model = NvidiaNet(in_channels=config.IMG_SHAPE[0]).to(config.DEVICE)
model.load_state_dict(torch.load(config.MODEL_FILE, map_location=config.DEVICE)["state_dict"])
model.eval()
num_correct = 0.0
count = 0
with torch.no_grad():
loop = tqdm(loader, position=0, leave=True)
for batch_idx, (inputs, z, x) in enumerate(loop):
inputs = inputs.to(config.DEVICE)
if config.TRAIN_TYPE == 'z':
targets = z.unsqueeze(1).to(torch.float32).to(config.DEVICE)
else:
targets = x.unsqueeze(1).to(torch.float32).to(config.DEVICE)
predictions = model(inputs)
num_correct += sum(abs(torch.round(targets/config.ERROR_TOLERENCE) - torch.round(predictions/config.ERROR_TOLERENCE)) <= 1).item()
count += predictions.shape[0]
loop.set_postfix(accuracy=100*num_correct/count)
if __name__ == "__main__":
main()
| true
| true
|
79042291a3a9fe72be66da8fcd766e1455ffd1e2
| 11,681
|
py
|
Python
|
perf/benchmark/runner/runner.py
|
jwendell/tools
|
992be29e272131f3b8129a03dac5347eb8f62a03
|
[
"Apache-2.0"
] | null | null | null |
perf/benchmark/runner/runner.py
|
jwendell/tools
|
992be29e272131f3b8129a03dac5347eb8f62a03
|
[
"Apache-2.0"
] | null | null | null |
perf/benchmark/runner/runner.py
|
jwendell/tools
|
992be29e272131f3b8129a03dac5347eb8f62a03
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import os
import json
import argparse
import subprocess
import shlex
import uuid
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
import sys
if sys.version_info.major == 2:
from commands import getoutput
else:
from subprocess import getoutput
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
def pod_info(filterstr="", namespace="twopods", multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080, "ingress": 80},
"grpc": {"direct_port": 8076, "port": 8079, "ingress": 80},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
mixer_mode="mixer",
mixer_cache=True,
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
labels=None,
baseline=False,
serversidecar=False,
clientsidecar=True,
ingress=None,
mesh="istio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = os.environ.get("NAMESPACE", "twopods")
# bucket resolution in seconds
self.r = "0.00005"
self.mixer_mode = mixer_mode
self.mixer_cache = mixer_cache
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.labels = labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_ingress = ingress
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def nosidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_base " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["direct_port"], size=self.size)
def serversidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_serveronly " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["port"], size=self.size)
def bothsidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_both " + basestr.format(
svc=self.server.labels["app"], port=self.ports[self.mode]["port"], size=self.size)
def ingress(self, fortio_cmd):
svc = self.run_ingress
if ':' not in svc:
svc += ":{port}".format(port=self.ports[self.mode]["ingress"])
return fortio_cmd + "_ingress http://{svc}/echo?size={size}".format(
svc=svc, size=self.size)
def run(self, conn, qps, size, duration):
size = size or self.size
if duration is None:
duration = self.duration
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
# Mixer label
labels += "_"
labels += self.mixer_mode
if self.labels is not None:
labels += "_" + self.labels
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
fortio_cmd = (
"fortio load -c {conn} -qps {qps} -t {duration}s -a -r {r} {grpc} -httpbufferkb=128 " +
"-labels {labels}").format(
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
labels=labels)
if self.run_ingress:
kubectl_exec(self.client.name, self.ingress(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
if self.run_serversidecar:
kubectl_exec(self.client.name, self.serversidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_serveronly",
duration=40)
if self.run_clientsidecar:
kubectl_exec(self.client.name, self.bothsidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_bothsidecars",
duration=40)
if self.run_baseline:
kubectl_exec(self.client.name, self.nosidecar(fortio_cmd))
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
# copy executable over
kubectl_cp(PERFSH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", filename + ".perf", mesh + "-proxy")
run_command_sync("../flame/flame.sh " + filename + ".perf")
def kubectl_cp(from_file, to_file, container):
namespace = os.environ.get("NAMESPACE", "twopods")
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=namespace,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
namespace = os.environ.get("NAMESPACE", "twopods")
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec -i -t {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=namespace)
print(cmd)
runfn(cmd)
def rc(command):
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip() + "\n")
return process.poll()
def run(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
if args.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
fortio = Fortio(
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
labels=args.labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
mixer_mode=args.mixer_mode)
for conn in args.conn:
for qps in args.qps:
fortio.run(conn=conn, qps=qps,
duration=args.duration, size=args.size)
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--mixer_mode",
help="run with different mixer configurations: mixer, nomixer, mixerv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress",
default=None)
parser.add_argument(
"--labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar and serversidecar for all", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no-" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run(args)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
| 30.658793
| 99
| 0.583769
|
from __future__ import print_function
import collections
import os
import json
import argparse
import subprocess
import shlex
import uuid
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
import sys
if sys.version_info.major == 2:
from commands import getoutput
else:
from subprocess import getoutput
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
def pod_info(filterstr="", namespace="twopods", multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080, "ingress": 80},
"grpc": {"direct_port": 8076, "port": 8079, "ingress": 80},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
mixer_mode="mixer",
mixer_cache=True,
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
labels=None,
baseline=False,
serversidecar=False,
clientsidecar=True,
ingress=None,
mesh="istio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = os.environ.get("NAMESPACE", "twopods")
self.r = "0.00005"
self.mixer_mode = mixer_mode
self.mixer_cache = mixer_cache
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.labels = labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_ingress = ingress
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def nosidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_base " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["direct_port"], size=self.size)
def serversidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_serveronly " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["port"], size=self.size)
def bothsidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_both " + basestr.format(
svc=self.server.labels["app"], port=self.ports[self.mode]["port"], size=self.size)
def ingress(self, fortio_cmd):
svc = self.run_ingress
if ':' not in svc:
svc += ":{port}".format(port=self.ports[self.mode]["ingress"])
return fortio_cmd + "_ingress http://{svc}/echo?size={size}".format(
svc=svc, size=self.size)
def run(self, conn, qps, size, duration):
size = size or self.size
if duration is None:
duration = self.duration
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
labels += "_"
labels += self.mixer_mode
if self.labels is not None:
labels += "_" + self.labels
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
fortio_cmd = (
"fortio load -c {conn} -qps {qps} -t {duration}s -a -r {r} {grpc} -httpbufferkb=128 " +
"-labels {labels}").format(
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
labels=labels)
if self.run_ingress:
kubectl_exec(self.client.name, self.ingress(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
if self.run_serversidecar:
kubectl_exec(self.client.name, self.serversidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_serveronly",
duration=40)
if self.run_clientsidecar:
kubectl_exec(self.client.name, self.bothsidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_bothsidecars",
duration=40)
if self.run_baseline:
kubectl_exec(self.client.name, self.nosidecar(fortio_cmd))
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
kubectl_cp(PERFSH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", filename + ".perf", mesh + "-proxy")
run_command_sync("../flame/flame.sh " + filename + ".perf")
def kubectl_cp(from_file, to_file, container):
namespace = os.environ.get("NAMESPACE", "twopods")
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=namespace,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
namespace = os.environ.get("NAMESPACE", "twopods")
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec -i -t {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=namespace)
print(cmd)
runfn(cmd)
def rc(command):
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip() + "\n")
return process.poll()
def run(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
if args.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
fortio = Fortio(
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
labels=args.labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
mixer_mode=args.mixer_mode)
for conn in args.conn:
for qps in args.qps:
fortio.run(conn=conn, qps=qps,
duration=args.duration, size=args.size)
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--mixer_mode",
help="run with different mixer configurations: mixer, nomixer, mixerv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress",
default=None)
parser.add_argument(
"--labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar and serversidecar for all", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no-" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run(args)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
| true
| true
|
7904234621e721b43376c422781755f4a239d255
| 4,023
|
py
|
Python
|
tests/integration/operators_test/boolean_test.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 61
|
2020-07-06T17:11:46.000Z
|
2022-03-12T14:42:51.000Z
|
tests/integration/operators_test/boolean_test.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 1
|
2021-02-25T01:30:29.000Z
|
2021-11-09T11:13:14.000Z
|
tests/integration/operators_test/boolean_test.py
|
gglin001/popart
|
3225214343f6d98550b6620e809a3544e8bcbfc6
|
[
"MIT"
] | 6
|
2020-07-15T12:33:13.000Z
|
2021-11-07T06:55:00.000Z
|
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
def test_and(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_and(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_or(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_or(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
print(o)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_not(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
print(d1)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.logical_not([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
return [np.logical_not(d1)]
op_tester.run(init_builder, reference, step_type='infer')
def test_equal(op_tester):
d1 = (np.random.randn(2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
d2[0] = d1[0]
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_equal(op_tester):
d1 = (np.random.randn(2, 2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
# d2[0][0] = d1[0]
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
| 27
| 61
| 0.61745
|
import numpy as np
import popart
import torch
import pytest
from op_tester import op_tester
def test_and(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_and(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_and([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 & t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_or(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_or(op_tester):
d1 = (np.random.randn(2, 2) > 0).astype(np.bool_)
d2 = (np.random.randn(2) > 0).astype(np.bool_)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.logical_or([i1, i2])
print(o)
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1, dtype=torch.bool)
t2 = torch.tensor(d2, dtype=torch.bool)
out = t1 | t2
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_not(op_tester):
d1 = (np.random.randn(2) > 0).astype(np.bool_)
print(d1)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
o = builder.aiOnnx.logical_not([i1])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
return [np.logical_not(d1)]
op_tester.run(init_builder, reference, step_type='infer')
def test_equal(op_tester):
d1 = (np.random.randn(2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
d2[0] = d1[0]
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
def test_broadcast_equal(op_tester):
d1 = (np.random.randn(2, 2)).astype(np.float32)
d2 = (np.random.randn(2)).astype(np.float32)
def init_builder(builder):
i1 = builder.addInputTensor(d1)
i2 = builder.addInputTensor(d2)
o = builder.aiOnnx.equal([i1, i2])
builder.addOutputTensor(o)
return [o]
def reference(ref_data):
t1 = torch.tensor(d1)
t2 = torch.tensor(d2)
out = torch.eq(t1, t2)
return [out]
op_tester.run(init_builder, reference, step_type='infer')
| true
| true
|
79042439b319a493991d940cde55d4e859f9cf39
| 8,113
|
py
|
Python
|
pipekit/component.py
|
DrDub/pipekit
|
87bd69ac2926085a27d72e9e5f19bd05038f178e
|
[
"MIT"
] | null | null | null |
pipekit/component.py
|
DrDub/pipekit
|
87bd69ac2926085a27d72e9e5f19bd05038f178e
|
[
"MIT"
] | null | null | null |
pipekit/component.py
|
DrDub/pipekit
|
87bd69ac2926085a27d72e9e5f19bd05038f178e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
"""A stateful element in a workflow that can be configured, run, and uniquely named."""
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {" ".join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
# event.id = self._fqevent(status)
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
"""Return `True` if given status was set."""
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
"""Return `True` if in one of the running states."""
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
"""Return `True` if in one of the stopped states."""
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
"""Return `True` if the aborted event was emitted."""
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
"""Return the msg prefixed with this component's ID and type."""
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
"""Return result of `callable`, or raise `ComponentInterrupted` if component is stopped."""
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
"""Return a fully qualified ID string representing this event."""
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
"""Return a new `Queue` object that will see all events."""
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
"""Register a callback that will be called upon the given event."""
_event_callbacks[event].append(partial(callable, *args, **kwargs))
| 33.524793
| 99
| 0.590903
|
import asyncio
import logging
from collections import defaultdict
from functools import partial
from box import Box
_l = logging.getLogger(__name__)
_instances = dict()
_events = defaultdict(asyncio.Event)
_event_queues = list()
_event_callbacks = defaultdict(list)
class Component:
def __init__(self, *args, id=None, workflow=None, parent=None, logger=_l, **kwargs):
self.id = id
if id:
key = (type(self), id)
if key in _instances:
raise ValueError(
f'{key[0].__name__} with ID "{id}" already exists: {_instances[key]}')
_instances[key] = self
self.workflow = workflow
self.parent = parent
self.children = list()
if parent:
parent.children.append(self)
self.logger = logger
self.loop = asyncio.get_event_loop()
self._event_lock = set()
self._debug = {'events'}
self._settings = Box(self.configure(**kwargs) or dict())
if not workflow:
workflow = self
settings = [f'{k}={v}' for k, v in workflow.safe_settings(self._settings).items()]
self.debug(f'Initialized {" ".join(settings)}')
def configure(self, **settings):
return settings
def settings(self, **override):
return Box(self._settings, **override)
def safe_settings(self, settings):
return settings
@property
def type(self):
return type(self).__name__
@property
def status(self):
return getattr(self, '_status', None)
@status.setter
def status(self, status):
if not (self.hasstatus(status) or status in self._event_lock):
self._event_lock.add(status)
try:
self._status_setter(status)
finally:
self._event_lock.remove(status)
_dependent_statuses = {'processing-finished', 'finished', 'exited'}
def _status_setter(self, status):
event = status if isinstance(status, ComponentEvent) else ComponentEvent(status, self)
if event.status in self._dependent_statuses:
children = set(filter(lambda c: isinstance(c, Component), self.children))
ready = set(filter(lambda c: c.hasstatus(event.status), children))
if len(children) > len(ready):
if 'events' in self._debug:
pending = ", ".join(c.id for c in children.difference(ready))
self.debug(f'Status "{event.status}" waiting on {pending}')
return
if self.hasstatus('aborted') and event.status != 'exited':
if 'events' in self._debug:
self.debug(f'Ignoring status "{event.status}" because the component is '
'in aborted state')
return
if 'events' in self._debug:
self.debug(f'Emitting event "{event.id}"')
self._status = event.status
_events[event.id].set()
for queue in _event_queues:
queue.put_nowait(event)
if self.parent and event.status != 'aborted' and not isinstance(self, LocalEvents):
self.parent.status = event.status
for callback in _event_callbacks[event.id]:
asyncio.ensure_future(callback())
_event_callbacks[event.id].clear()
def hasstatus(self, status):
if isinstance(status, ComponentEvent):
event = status.id
elif ':' in status:
event = status
else:
event = ComponentEvent(status, self).id
return _events[event].is_set()
async def waiton(self, event):
if 'events' in self._debug:
self.debug(f'Waiting on event "{event}"')
await _events[event].wait()
if 'events' in self._debug:
self.debug(f'Received event "{event}"')
@property
def running(self):
if not self.stopped:
for status in ['started', 'running']:
if self.hasstatus(status):
return True
@property
def stopped(self):
for status in ['aborted', 'finished']:
if self.hasstatus(status):
return True
@property
def aborted(self):
return self.hasstatus('aborted')
def start(self):
self.status = 'started'
return self.run()
def stop(self):
self.debug('Stopping')
def abort(self, exception=None):
if self.hasstatus('aborted'):
return
self.status = ComponentEvent('aborted', self, exception)
for child in self.children:
if child.settings().get('error-propagation') in ('none', 'up'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to child {child.id}')
elif not child.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to child {child.id}')
child.abort()
if self.parent:
if self.parent.settings().get('error-propagation') in ('none', 'down'):
if 'events' in self._debug:
self.debug(f'Suppressing error propagation to parent {self.parent.id}')
elif not self.parent.hasstatus('aborted'):
if 'events' in self._debug:
self.debug(f'Propagating error to parent {self.parent.id}')
self.parent.abort(exception)
def __getattr__(self, name):
if name not in ('critical', 'error', 'warning', 'info', 'debug', 'exception'):
raise AttributeError(f"'{self.type}' object has no attribute '{name}'")
return partial(self._proxied_logging_method, name)
def _proxied_logging_method(self, method, *args, **kwargs):
if method == 'debug':
if logging in (self.workflow or self).settings():
debug = (self.workflow or self).settings().logging.debug
else:
debug = []
if not ('all' in debug or self.type in debug or (self.id in debug)):
return lambda *a, **kw: None
return getattr(self.logger, method)(*self._log_formatted(*args), **kwargs)
def _log_formatted(self, msg, *args):
prefix = f'{self.id} ' if self.id else ''
msg = f'{prefix}({self.type}) {msg}'
return (msg,) + args
async def run(self):
self.status = 'running'
async def try_while_running(self, callable, timeout=0.5):
while self.running:
coro = callable()
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
pass
raise ComponentInterrupted
class ComponentEvent:
def __init__(self, status, component, exception=None):
self.status = status
self.component = component
self.exception = exception
@property
def id(self):
return f'{self.component.id}:{self.status}'
class LocalEvents:
pass
class ComponentInterrupted(Exception):
pass
def get_event_listener():
queue = asyncio.Queue()
_event_queues.append(queue)
return queue
def add_event_callback(event, callable, *args, **kwargs):
_event_callbacks[event].append(partial(callable, *args, **kwargs))
| true
| true
|
790424670a7b4b7ed5c11bd34cd56db2fcf6b60b
| 2,424
|
py
|
Python
|
hatasmota/mqtt.py
|
effelle/hatasmota
|
ec34f2e9b635cc35ee67f0d048a32a057313da60
|
[
"MIT"
] | null | null | null |
hatasmota/mqtt.py
|
effelle/hatasmota
|
ec34f2e9b635cc35ee67f0d048a32a057313da60
|
[
"MIT"
] | null | null | null |
hatasmota/mqtt.py
|
effelle/hatasmota
|
ec34f2e9b635cc35ee67f0d048a32a057313da60
|
[
"MIT"
] | null | null | null |
"""Tasmota MQTT."""
import asyncio
import logging
from typing import Union
import attr
from .const import COMMAND_BACKLOG
DEBOUNCE_TIMEOUT = 1
_LOGGER = logging.getLogger(__name__)
class Timer:
"""Simple timer."""
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
self._callback()
def cancel(self):
"""Cancel the timer."""
self._task.cancel()
PublishPayloadType = Union[str, bytes, int, float, None]
@attr.s(slots=True, frozen=True)
class Message:
"""MQTT Message."""
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
class TasmotaMQTTClient:
"""Helper class to sue an external MQTT client."""
def __init__(self, publish, subscribe, unsubscribe):
"""Initialize."""
self._pending_messages = {}
self._publish = publish
self._subscribe = subscribe
self._unsubscribe = unsubscribe
def publish(self, *args, **kwds):
"""Publish a message."""
return self._publish(*args, **kwds)
def publish_debounced(self, topic, payload, qos=None, retain=None):
"""Publish a message, with debounce."""
msg = Message(topic, payload, qos, retain)
def publish_callback():
_LOGGER.debug("publish_debounced: publishing %s", msg)
self._pending_messages.pop(msg)
self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg.retain)
if msg in self._pending_messages:
timer = self._pending_messages.pop(msg)
timer.cancel()
timer = Timer(DEBOUNCE_TIMEOUT, publish_callback)
self._pending_messages[msg] = timer
async def subscribe(self, sub_state, topics):
"""Subscribe to topics."""
return await self._subscribe(sub_state, topics)
async def unsubscribe(self, sub_state):
"""Unsubscribe from topics."""
return await self._unsubscribe(sub_state)
def send_commands(mqtt_client, command_topic, commands):
"""Send a sequence of commands."""
backlog_topic = command_topic + COMMAND_BACKLOG
backlog = ";".join(["NoDelay;%s %s" % command for command in commands])
mqtt_client.publish(backlog_topic, backlog)
| 27.545455
| 80
| 0.64934
|
import asyncio
import logging
from typing import Union
import attr
from .const import COMMAND_BACKLOG
DEBOUNCE_TIMEOUT = 1
_LOGGER = logging.getLogger(__name__)
class Timer:
def __init__(self, timeout, callback):
self._timeout = timeout
self._callback = callback
self._task = asyncio.ensure_future(self._job())
async def _job(self):
await asyncio.sleep(self._timeout)
self._callback()
def cancel(self):
self._task.cancel()
PublishPayloadType = Union[str, bytes, int, float, None]
@attr.s(slots=True, frozen=True)
class Message:
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
class TasmotaMQTTClient:
def __init__(self, publish, subscribe, unsubscribe):
self._pending_messages = {}
self._publish = publish
self._subscribe = subscribe
self._unsubscribe = unsubscribe
def publish(self, *args, **kwds):
return self._publish(*args, **kwds)
def publish_debounced(self, topic, payload, qos=None, retain=None):
msg = Message(topic, payload, qos, retain)
def publish_callback():
_LOGGER.debug("publish_debounced: publishing %s", msg)
self._pending_messages.pop(msg)
self.publish(msg.topic, msg.payload, qos=msg.qos, retain=msg.retain)
if msg in self._pending_messages:
timer = self._pending_messages.pop(msg)
timer.cancel()
timer = Timer(DEBOUNCE_TIMEOUT, publish_callback)
self._pending_messages[msg] = timer
async def subscribe(self, sub_state, topics):
return await self._subscribe(sub_state, topics)
async def unsubscribe(self, sub_state):
return await self._unsubscribe(sub_state)
def send_commands(mqtt_client, command_topic, commands):
backlog_topic = command_topic + COMMAND_BACKLOG
backlog = ";".join(["NoDelay;%s %s" % command for command in commands])
mqtt_client.publish(backlog_topic, backlog)
| true
| true
|
7904253dcb398235ae696a22544c7fce9840d06c
| 103,140
|
py
|
Python
|
moto/s3/responses.py
|
nom3ad/moto
|
deeabfc6e5884bfe16631b3b020d29a1d558d31c
|
[
"Apache-2.0"
] | null | null | null |
moto/s3/responses.py
|
nom3ad/moto
|
deeabfc6e5884bfe16631b3b020d29a1d558d31c
|
[
"Apache-2.0"
] | null | null | null |
moto/s3/responses.py
|
nom3ad/moto
|
deeabfc6e5884bfe16631b3b020d29a1d558d31c
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
import io
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
amzn_request_id,
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
BucketMustHaveLockeEnabled,
DuplicateTagKeys,
InvalidContentMD5,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidMaxPartArgument,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
LockNotEnabled,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"HEAD": {"DEFAULT": "HeadBucket",},
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"HEAD": {"DEFAULT": "HeadObject",},
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": "DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
@amzn_request_id
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name, querystring)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name, querystring):
self._set_action("BUCKET", "HEAD", querystring)
self._authenticate_and_authorize_s3_action()
try:
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
(
versions,
common_prefixes,
delete_markers,
) = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
key_list = versions
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return (
200,
{},
template.render(
common_prefixes=common_prefixes,
key_list=key_list,
delete_marker_list=delete_markers,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter=delimiter,
key_marker=key_marker,
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.list_objects(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
all_keys = self.backend.list_objects_v2(bucket, prefix, delimiter)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if max_keys == 0:
result_keys = []
is_truncated = True
next_continuation_token = None
elif len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
body_decoded = body.decode()
config = self._lock_config_from_xml(body_decoded)
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_object_lock_configuration(
bucket_name,
config.get("enabled"),
config.get("mode"),
config.get("days"),
config.get("years"),
)
return 200, {}, ""
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
new_bucket = self.backend.get_bucket(bucket_name)
if (
new_bucket.region_name == DEFAULT_REGION_NAME
and region_name == DEFAULT_REGION_NAME
):
# us-east-1 has different behavior - creating a bucket there is an idempotent operation
pass
else:
template = self.response_template(S3_DUPLICATE_BUCKET_ERROR)
return 409, {}, template.render(bucket_name=bucket_name)
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
if (
request.headers.get("x-amz-bucket-object-lock-enabled", "").lower()
== "true"
):
new_bucket.object_lock_enabled = True
new_bucket.versioning_status = "Enabled"
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body, strip_whitespace=False)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def _handle_v4_chunk_signatures(self, body, content_length):
body_io = io.BytesIO(body)
new_body = bytearray(content_length)
pos = 0
line = body_io.readline()
while line:
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
# str(hex(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
chunk_size = int(line[: line.find(b";")].decode("utf8"), 16)
new_body[pos : pos + chunk_size] = body_io.read(chunk_size)
pos = pos + chunk_size
body_io.read(2) # skip trailing \r\n
line = body_io.readline()
return bytes(new_body)
@amzn_request_id
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, self.headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if not body:
# when the data is being passed as a file
if request.files:
for _, value in request.files.items():
body = value.stream.read()
elif hasattr(request, "form"):
# Body comes through as part of the form, if no content-type is set on the PUT-request
# form = ImmutableMultiDict([('some data 123 321', '')])
form = request.form
for k, _ in form.items():
body = k
if body is None:
body = b""
if (
request.headers.get("x-amz-content-sha256", None)
== "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
):
body = self._handle_v4_chunk_signatures(
body, int(request.headers["x-amz-decoded-content-length"])
)
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(headers, bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
# 0 <= PartNumberMarker <= 2,147,483,647
part_number_marker = int(query.get("part-number-marker", [0])[0])
if not (0 <= part_number_marker <= 2147483647):
raise InvalidMaxPartArgument("part-number-marker", 0, 2147483647)
# 0 <= MaxParts <= 2,147,483,647 (default is 1,000)
max_parts = int(query.get("max-parts", [1000])[0])
if not (0 <= max_parts <= 2147483647):
raise InvalidMaxPartArgument("max-parts", 0, 2147483647)
parts = self.backend.list_parts(
bucket_name,
upload_id,
part_number_marker=part_number_marker,
max_parts=max_parts,
)
next_part_number_marker = parts[-1].name + 1 if parts else 0
is_truncated = parts and self.backend.is_truncated(
bucket_name, upload_id, next_part_number_marker
)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
is_truncated=str(is_truncated).lower(),
max_parts=max_parts,
next_part_number_marker=next_part_number_marker,
parts=parts,
part_number_marker=part_number_marker,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
if "legal-hold" in query:
legal_hold = self.backend.get_object_legal_hold(key)
template = self.response_template(S3_OBJECT_LEGAL_HOLD)
return 200, response_headers, template.render(legal_hold=legal_hold)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
bucket = self.backend.get_bucket(bucket_name)
lock_enabled = bucket.object_lock_enabled
lock_mode = request.headers.get("x-amz-object-lock-mode", None)
lock_until = request.headers.get("x-amz-object-lock-retain-until-date", None)
legal_hold = request.headers.get("x-amz-object-lock-legal-hold", "OFF")
if lock_mode or lock_until or legal_hold == "ON":
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
if not lock_enabled:
raise LockNotEnabled
elif lock_enabled and bucket.has_default_lock:
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
lock_until = bucket.default_retention()
lock_mode = bucket.default_lock_mode
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "retention" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
retention = self._mode_until_from_xml(body)
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
legal_hold_status = self._legal_hold_status_from_xml(body)
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=legal_hold,
lock_until=lock_until,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "HEAD", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _lock_config_from_xml(self, xml):
response_dict = {"enabled": False, "mode": None, "days": None, "years": None}
parsed_xml = xmltodict.parse(xml)
enabled = (
parsed_xml["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
)
response_dict["enabled"] = enabled
default_retention = parsed_xml.get("ObjectLockConfiguration").get("Rule")
if default_retention:
default_retention = default_retention.get("DefaultRetention")
mode = default_retention["Mode"]
days = int(default_retention.get("Days", 0))
years = int(default_retention.get("Years", 0))
if days and years:
raise MalformedXML
response_dict["mode"] = mode
response_dict["days"] = days
response_dict["years"] = years
return response_dict
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _mode_until_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return (
parsed_xml["Retention"]["Mode"],
parsed_xml["Retention"]["RetainUntilDate"],
)
def _legal_hold_status_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return parsed_xml["LegalHold"]["Status"]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, headers, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
bypass = headers.get("X-Amz-Bypass-Governance-Retention")
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id, bypass=bypass
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
{% if common_prefixes %}
{% for prefix in common_prefixes %}
<CommonPrefixes>
<Prefix>{{ prefix }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
<Delimiter>{{ delimiter }}</Delimiter>
<KeyMarker>{{ key_marker or "" }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{{ 'true' if key.is_latest else 'false' }}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{{ 'true' if marker.is_latest else 'false' }}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_LEGAL_HOLD = """<?xml version="1.0" encoding="UTF-8"?>
<LegalHold>
<Status>{{ legal_hold }}</Status>
</LegalHold>
"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>{{ part_number_marker }}</PartNumberMarker>
<NextPartNumberMarker>{{ next_part_number_marker }}</NextPartNumberMarker>
<MaxParts>{{ max_parts }}</MaxParts>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
S3_BUCKET_LOCK_CONFIGURATION = """
<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{%if lock_enabled %}
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
{% else %}
<ObjectLockEnabled>Disabled</ObjectLockEnabled>
{% endif %}
{% if mode %}
<Rule>
<DefaultRetention>
<Mode>{{mode}}</Mode>
<Days>{{days}}</Days>
<Years>{{years}}</Years>
</DefaultRetention>
</Rule>
{% endif %}
</ObjectLockConfiguration>
"""
S3_DUPLICATE_BUCKET_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BucketAlreadyOwnedByYou</Code>
<Message>Your previous request to create the named bucket succeeded and you already own it.</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
| 38.059041
| 168
| 0.58727
|
from __future__ import unicode_literals
import io
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
amzn_request_id,
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
BucketMustHaveLockeEnabled,
DuplicateTagKeys,
InvalidContentMD5,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidMaxPartArgument,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
LockNotEnabled,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"HEAD": {"DEFAULT": "HeadBucket",},
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"HEAD": {"DEFAULT": "HeadObject",},
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": "DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
@amzn_request_id
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name, querystring)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name, querystring):
self._set_action("BUCKET", "HEAD", querystring)
self._authenticate_and_authorize_s3_action()
try:
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
(
versions,
common_prefixes,
delete_markers,
) = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
key_list = versions
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return (
200,
{},
template.render(
common_prefixes=common_prefixes,
key_list=key_list,
delete_marker_list=delete_markers,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter=delimiter,
key_marker=key_marker,
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.list_objects(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
all_keys = self.backend.list_objects_v2(bucket, prefix, delimiter)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if max_keys == 0:
result_keys = []
is_truncated = True
next_continuation_token = None
elif len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
body_decoded = body.decode()
config = self._lock_config_from_xml(body_decoded)
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_object_lock_configuration(
bucket_name,
config.get("enabled"),
config.get("mode"),
config.get("days"),
config.get("years"),
)
return 200, {}, ""
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
new_bucket = self.backend.get_bucket(bucket_name)
if (
new_bucket.region_name == DEFAULT_REGION_NAME
and region_name == DEFAULT_REGION_NAME
):
# us-east-1 has different behavior - creating a bucket there is an idempotent operation
pass
else:
template = self.response_template(S3_DUPLICATE_BUCKET_ERROR)
return 409, {}, template.render(bucket_name=bucket_name)
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
if (
request.headers.get("x-amz-bucket-object-lock-enabled", "").lower()
== "true"
):
new_bucket.object_lock_enabled = True
new_bucket.versioning_status = "Enabled"
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body, strip_whitespace=False)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def _handle_v4_chunk_signatures(self, body, content_length):
body_io = io.BytesIO(body)
new_body = bytearray(content_length)
pos = 0
line = body_io.readline()
while line:
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
# str(hex(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
chunk_size = int(line[: line.find(b";")].decode("utf8"), 16)
new_body[pos : pos + chunk_size] = body_io.read(chunk_size)
pos = pos + chunk_size
body_io.read(2) # skip trailing \r\n
line = body_io.readline()
return bytes(new_body)
@amzn_request_id
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, self.headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if not body:
# when the data is being passed as a file
if request.files:
for _, value in request.files.items():
body = value.stream.read()
elif hasattr(request, "form"):
# Body comes through as part of the form, if no content-type is set on the PUT-request
# form = ImmutableMultiDict([('some data 123 321', '')])
form = request.form
for k, _ in form.items():
body = k
if body is None:
body = b""
if (
request.headers.get("x-amz-content-sha256", None)
== "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
):
body = self._handle_v4_chunk_signatures(
body, int(request.headers["x-amz-decoded-content-length"])
)
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(headers, bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
# 0 <= PartNumberMarker <= 2,147,483,647
part_number_marker = int(query.get("part-number-marker", [0])[0])
if not (0 <= part_number_marker <= 2147483647):
raise InvalidMaxPartArgument("part-number-marker", 0, 2147483647)
# 0 <= MaxParts <= 2,147,483,647 (default is 1,000)
max_parts = int(query.get("max-parts", [1000])[0])
if not (0 <= max_parts <= 2147483647):
raise InvalidMaxPartArgument("max-parts", 0, 2147483647)
parts = self.backend.list_parts(
bucket_name,
upload_id,
part_number_marker=part_number_marker,
max_parts=max_parts,
)
next_part_number_marker = parts[-1].name + 1 if parts else 0
is_truncated = parts and self.backend.is_truncated(
bucket_name, upload_id, next_part_number_marker
)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
is_truncated=str(is_truncated).lower(),
max_parts=max_parts,
next_part_number_marker=next_part_number_marker,
parts=parts,
part_number_marker=part_number_marker,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
if "legal-hold" in query:
legal_hold = self.backend.get_object_legal_hold(key)
template = self.response_template(S3_OBJECT_LEGAL_HOLD)
return 200, response_headers, template.render(legal_hold=legal_hold)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
bucket = self.backend.get_bucket(bucket_name)
lock_enabled = bucket.object_lock_enabled
lock_mode = request.headers.get("x-amz-object-lock-mode", None)
lock_until = request.headers.get("x-amz-object-lock-retain-until-date", None)
legal_hold = request.headers.get("x-amz-object-lock-legal-hold", "OFF")
if lock_mode or lock_until or legal_hold == "ON":
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
if not lock_enabled:
raise LockNotEnabled
elif lock_enabled and bucket.has_default_lock:
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
lock_until = bucket.default_retention()
lock_mode = bucket.default_lock_mode
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "retention" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
retention = self._mode_until_from_xml(body)
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
legal_hold_status = self._legal_hold_status_from_xml(body)
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=legal_hold,
lock_until=lock_until,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "HEAD", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _lock_config_from_xml(self, xml):
response_dict = {"enabled": False, "mode": None, "days": None, "years": None}
parsed_xml = xmltodict.parse(xml)
enabled = (
parsed_xml["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
)
response_dict["enabled"] = enabled
default_retention = parsed_xml.get("ObjectLockConfiguration").get("Rule")
if default_retention:
default_retention = default_retention.get("DefaultRetention")
mode = default_retention["Mode"]
days = int(default_retention.get("Days", 0))
years = int(default_retention.get("Years", 0))
if days and years:
raise MalformedXML
response_dict["mode"] = mode
response_dict["days"] = days
response_dict["years"] = years
return response_dict
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _mode_until_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return (
parsed_xml["Retention"]["Mode"],
parsed_xml["Retention"]["RetainUntilDate"],
)
def _legal_hold_status_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return parsed_xml["LegalHold"]["Status"]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, headers, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
bypass = headers.get("X-Amz-Bypass-Governance-Retention")
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id, bypass=bypass
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
{% if common_prefixes %}
{% for prefix in common_prefixes %}
<CommonPrefixes>
<Prefix>{{ prefix }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
<Delimiter>{{ delimiter }}</Delimiter>
<KeyMarker>{{ key_marker or "" }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{{ 'true' if key.is_latest else 'false' }}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{{ 'true' if marker.is_latest else 'false' }}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_LEGAL_HOLD = """<?xml version="1.0" encoding="UTF-8"?>
<LegalHold>
<Status>{{ legal_hold }}</Status>
</LegalHold>
"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>{{ part_number_marker }}</PartNumberMarker>
<NextPartNumberMarker>{{ next_part_number_marker }}</NextPartNumberMarker>
<MaxParts>{{ max_parts }}</MaxParts>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
S3_BUCKET_LOCK_CONFIGURATION = """
<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{%if lock_enabled %}
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
{% else %}
<ObjectLockEnabled>Disabled</ObjectLockEnabled>
{% endif %}
{% if mode %}
<Rule>
<DefaultRetention>
<Mode>{{mode}}</Mode>
<Days>{{days}}</Days>
<Years>{{years}}</Years>
</DefaultRetention>
</Rule>
{% endif %}
</ObjectLockConfiguration>
"""
S3_DUPLICATE_BUCKET_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BucketAlreadyOwnedByYou</Code>
<Message>Your previous request to create the named bucket succeeded and you already own it.</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
| true
| true
|
790425cdf51c12d5693923f4db37300a4be41df1
| 4,860
|
py
|
Python
|
lib/datasets/osd_object.py
|
datboi223/UnseenObjectClustering
|
32ec100e7c15478fba5e67509c7bff397e7c885e
|
[
"BSD-Source-Code"
] | null | null | null |
lib/datasets/osd_object.py
|
datboi223/UnseenObjectClustering
|
32ec100e7c15478fba5e67509c7bff397e7c885e
|
[
"BSD-Source-Code"
] | null | null | null |
lib/datasets/osd_object.py
|
datboi223/UnseenObjectClustering
|
32ec100e7c15478fba5e67509c7bff397e7c885e
|
[
"BSD-Source-Code"
] | null | null | null |
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
import torch
import torch.utils.data as data
import os, math
import sys
import time
import random
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import datasets
import open3d as o3d
import pcl
from fcn.config import cfg
from utils.blob import chromatic_transform, add_noise
from utils import mask as util_
class OSDObject(data.Dataset, datasets.imdb):
def __init__(self, image_set, osd_object_path = None):
self._name = 'osd_object_' + image_set
self._image_set = image_set
self._osd_object_path = self._get_default_path() if osd_object_path is None \
else osd_object_path
self._classes_all = ('__background__', 'foreground')
self._classes = self._classes_all
self._pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
self._width = 640
self._height = 480
## TODO
print('self._osd_object_path = ', self._osd_object_path)
# get all images
data_path = os.path.join(self._osd_object_path, 'image_color')
self.image_files = sorted(glob.glob(data_path + '/*.png'))
print('%d images for dataset %s' % (len(self.image_files), self._name))
self._size = len(self.image_files)
assert os.path.exists(self._osd_object_path), \
'osd_object path does not exist: {}'.format(self._osd_object_path)
def process_label(self, foreground_labels):
""" Process foreground_labels
- Map the foreground_labels to {0, 1, ..., K-1}
@param foreground_labels: a [H x W] numpy array of labels
@return: foreground_labels
"""
# Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def __getitem__(self, idx):
# BGR image
filename = self.image_files[idx]
print('filename = ', filename)
im = cv2.imread(filename)
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor_bgr = im_tensor.clone()
im_tensor_bgr = im_tensor_bgr.permute(2, 0, 1)
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
# Label
labels_filename = filename.replace('image_color', 'annotation')
foreground_labels = util_.imread_indexed(labels_filename)
foreground_labels = self.process_label(foreground_labels)
label_blob = torch.from_numpy(foreground_labels).unsqueeze(0)
index = filename.find('OSD')
sample = {'image_color': image_blob,
'image_color_bgr': im_tensor_bgr,
'label': label_blob,
'filename': filename[index+4:]}
# Depth image
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
pcd_filename = filename.replace('image_color', 'pcd')
pcd_filename = pcd_filename.replace('png', 'pcd')
print('pcd_filename = ', pcd_filename)
pcloud = pcl.load(pcd_filename).to_array()
pcloud[np.isnan(pcloud)] = 0
xyz_img = pcloud.reshape((self._height, self._width, 3))
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob
# # Depth image
# if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
# pcd_filename = filename.replace('image_color', 'pcd')
# pcd_filename = pcd_filename.replace('png', 'pcd')
# # pcl replaced with open3d
# pcloud = o3d.io.read_point_cloud(pcd_filename)
# pcloud = np.asarray(pcloud)
# print(np.isnan(pcloud))
# pcloud[np.isnan(pcloud)] = 0
# xyz_img = pcloud.reshape((self._height, self._width, 3))
# depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
# sample['depth'] = depth_blob
return sample
def __len__(self):
return self._size
def _get_default_path(self):
"""
Return the default path where osd_object is expected to be installed.
"""
return os.path.join(datasets.ROOT_DIR, 'data', 'OSD')
| 36.268657
| 85
| 0.627984
|
import torch
import torch.utils.data as data
import os, math
import sys
import time
import random
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import datasets
import open3d as o3d
import pcl
from fcn.config import cfg
from utils.blob import chromatic_transform, add_noise
from utils import mask as util_
class OSDObject(data.Dataset, datasets.imdb):
def __init__(self, image_set, osd_object_path = None):
self._name = 'osd_object_' + image_set
self._image_set = image_set
self._osd_object_path = self._get_default_path() if osd_object_path is None \
else osd_object_path
self._classes_all = ('__background__', 'foreground')
self._classes = self._classes_all
self._pixel_mean = torch.tensor(cfg.PIXEL_MEANS / 255.0).float()
self._width = 640
self._height = 480
print('self._osd_object_path = ', self._osd_object_path)
data_path = os.path.join(self._osd_object_path, 'image_color')
self.image_files = sorted(glob.glob(data_path + '/*.png'))
print('%d images for dataset %s' % (len(self.image_files), self._name))
self._size = len(self.image_files)
assert os.path.exists(self._osd_object_path), \
'osd_object path does not exist: {}'.format(self._osd_object_path)
def process_label(self, foreground_labels):
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def __getitem__(self, idx):
filename = self.image_files[idx]
print('filename = ', filename)
im = cv2.imread(filename)
if cfg.TRAIN.CHROMATIC and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = chromatic_transform(im)
if cfg.TRAIN.ADD_NOISE and cfg.MODE == 'TRAIN' and np.random.rand(1) > 0.1:
im = add_noise(im)
im_tensor = torch.from_numpy(im) / 255.0
im_tensor_bgr = im_tensor.clone()
im_tensor_bgr = im_tensor_bgr.permute(2, 0, 1)
im_tensor -= self._pixel_mean
image_blob = im_tensor.permute(2, 0, 1)
labels_filename = filename.replace('image_color', 'annotation')
foreground_labels = util_.imread_indexed(labels_filename)
foreground_labels = self.process_label(foreground_labels)
label_blob = torch.from_numpy(foreground_labels).unsqueeze(0)
index = filename.find('OSD')
sample = {'image_color': image_blob,
'image_color_bgr': im_tensor_bgr,
'label': label_blob,
'filename': filename[index+4:]}
if cfg.INPUT == 'DEPTH' or cfg.INPUT == 'RGBD':
pcd_filename = filename.replace('image_color', 'pcd')
pcd_filename = pcd_filename.replace('png', 'pcd')
print('pcd_filename = ', pcd_filename)
pcloud = pcl.load(pcd_filename).to_array()
pcloud[np.isnan(pcloud)] = 0
xyz_img = pcloud.reshape((self._height, self._width, 3))
depth_blob = torch.from_numpy(xyz_img).permute(2, 0, 1)
sample['depth'] = depth_blob
return sample
def __len__(self):
return self._size
def _get_default_path(self):
return os.path.join(datasets.ROOT_DIR, 'data', 'OSD')
| true
| true
|
790425d501b4c4fb6f9c05b89b6c2559834faa07
| 1,895
|
py
|
Python
|
components/nginx/scripts/retrieve_agents.py
|
AlexAdamenko/cloudify-openstack
|
6b89af7b9da4c55dc91684bd1781115e71353e03
|
[
"Apache-2.0"
] | null | null | null |
components/nginx/scripts/retrieve_agents.py
|
AlexAdamenko/cloudify-openstack
|
6b89af7b9da4c55dc91684bd1781115e71353e03
|
[
"Apache-2.0"
] | null | null | null |
components/nginx/scripts/retrieve_agents.py
|
AlexAdamenko/cloudify-openstack
|
6b89af7b9da4c55dc91684bd1781115e71353e03
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python
import platform
import fabric.api
from fabric.contrib.files import exists as remote_exists
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
def _get_distro_info():
distro, _, release = platform.linux_distribution(
full_distribution_name=False)
return '{0} {1}'.format(distro, release)
def retrieve(agent_packages):
ctx.logger.info('Downloading Cloudify Agents...')
if not agent_packages:
raise NonRecoverableError(
'Cannot find agent packages. At least one agent package must be '
'provided compatible with {0}.'.format(_get_distro_info()))
for agent, source_url in agent_packages.items():
dest_path = ctx.instance.runtime_properties['agent_packages_path']
agent_name = agent.replace('_', '-')
# This is a workaround for mapping Centos release names to versions
# to provide a better UX when providing agent inputs.
if agent_name == 'centos-7x-agent':
agent_name = 'centos-core-agent'
elif agent_name == 'centos-6x-agent':
agent_name = 'centos-final-agent'
elif agent_name == 'redhat-7x-agent':
agent_name = 'redhat-maipo-agent'
elif agent_name == 'redhat-6x-agent':
agent_name = 'redhat-santiago-agent'
if agent_name == 'cloudify-windows-agent':
filename = '{0}.exe'.format(agent_name)
else:
filename = '{0}.tar.gz'.format(agent_name)
dest_file = '{0}/{1}'.format(dest_path, filename)
ctx.logger.info('Downloading Agent Package {0} to {1} if it does not '
'already exist...'.format(source_url, dest_file))
if not remote_exists(dest_file):
dl_cmd = 'curl --retry 10 -f -s -S -L {0} --create-dirs -o {1}'
fabric.api.sudo(dl_cmd.format(source_url, dest_file))
| 37.156863
| 78
| 0.643272
|
import platform
import fabric.api
from fabric.contrib.files import exists as remote_exists
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
def _get_distro_info():
distro, _, release = platform.linux_distribution(
full_distribution_name=False)
return '{0} {1}'.format(distro, release)
def retrieve(agent_packages):
ctx.logger.info('Downloading Cloudify Agents...')
if not agent_packages:
raise NonRecoverableError(
'Cannot find agent packages. At least one agent package must be '
'provided compatible with {0}.'.format(_get_distro_info()))
for agent, source_url in agent_packages.items():
dest_path = ctx.instance.runtime_properties['agent_packages_path']
agent_name = agent.replace('_', '-')
if agent_name == 'centos-7x-agent':
agent_name = 'centos-core-agent'
elif agent_name == 'centos-6x-agent':
agent_name = 'centos-final-agent'
elif agent_name == 'redhat-7x-agent':
agent_name = 'redhat-maipo-agent'
elif agent_name == 'redhat-6x-agent':
agent_name = 'redhat-santiago-agent'
if agent_name == 'cloudify-windows-agent':
filename = '{0}.exe'.format(agent_name)
else:
filename = '{0}.tar.gz'.format(agent_name)
dest_file = '{0}/{1}'.format(dest_path, filename)
ctx.logger.info('Downloading Agent Package {0} to {1} if it does not '
'already exist...'.format(source_url, dest_file))
if not remote_exists(dest_file):
dl_cmd = 'curl --retry 10 -f -s -S -L {0} --create-dirs -o {1}'
fabric.api.sudo(dl_cmd.format(source_url, dest_file))
| true
| true
|
790426391fba57d10a1459d87543824a85e55517
| 22,145
|
py
|
Python
|
src/models.py
|
rupshree1999/Brats2019
|
715274b4a407f8ca8fa11d2e3743c5ddb328e59a
|
[
"MIT"
] | 94
|
2019-08-30T02:50:30.000Z
|
2022-03-04T07:56:20.000Z
|
src/models.py
|
rupshree1999/Brats2019
|
715274b4a407f8ca8fa11d2e3743c5ddb328e59a
|
[
"MIT"
] | 22
|
2019-11-05T06:36:37.000Z
|
2022-02-10T00:47:22.000Z
|
src/models.py
|
rupshree1999/Brats2019
|
715274b4a407f8ca8fa11d2e3743c5ddb328e59a
|
[
"MIT"
] | 28
|
2019-10-08T02:32:35.000Z
|
2021-12-14T04:10:32.000Z
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tflearn.layers.conv import global_avg_pool
#######################
# 3d functions
#######################
# convolution
# 3D unet graph
def unet(inputI, output_channel):
"""3D U-net"""
phase_flag = 1
concat_dim = 4
conv1_1 = conv3d(
input=inputI,
output_chn=64,
kernel_size=3,
stride=1,
use_bias=False,
name='conv1')
# conv1_1 (1, 96, 96, 96, 64)
conv1_bn = tf.contrib.layers.batch_norm(
conv1_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv1_batch_norm")
conv1_relu = tf.nn.relu(conv1_bn, name='conv1_relu')
pool1_in = tf.layers.max_pooling3d(
inputs=conv1_relu, pool_size=2, strides=2, name='pool1')
# pool1 (1, 48, 48, 48, 64)
# pool1_frac = fractal_net(
# is_global_path_list[0],
# global_path_list[0],
# local_path_list[0],
# self.Blocks,
# self.Columns)(pool1_in)
# pool1_old = pool1_in + pool1_frac
pool1 = pool1_in
conv2_1 = conv3d(
input=pool1,
output_chn=128,
kernel_size=3,
stride=1,
use_bias=False,
name='conv2')
# (1, 48, 48, 48, 128)
conv2_bn = tf.contrib.layers.batch_norm(
conv2_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv2_batch_norm")
conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu')
pool2_in = tf.layers.max_pooling3d(
inputs=conv2_relu, pool_size=2, strides=2, name='pool2')
# pool2 (1, 24, 24, 24, 128)
# pool2_frac = fractal_net(
# is_global_path_list[1],
# global_path_list[1],
# local_path_list[1],
# self.Blocks,
# self.Columns)(pool2_in)
# pool2 = pool2_in + pool2_frac
pool2 = pool2_in
conv3_1 = conv3d(
input=pool2,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
name='conv3a')
# (1, 24, 24, 24, 256)
conv3_1_bn = tf.contrib.layers.batch_norm(
conv3_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv3_1_batch_norm")
conv3_1_relu = tf.nn.relu(conv3_1_bn, name='conv3_1_relu')
conv3_2 = conv3d(
input=conv3_1_relu,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
name='conv3b')
# (1, 24, 24, 24, 256)
conv3_2 = conv3_2 + conv3_1
conv3_2_bn = tf.contrib.layers.batch_norm(
conv3_2,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv3_2_batch_norm")
conv3_2_relu = tf.nn.relu(conv3_2_bn, name='conv3_2_relu')
pool3_in = tf.layers.max_pooling3d(
inputs=conv3_2_relu, pool_size=2, strides=2, name='pool3')
# pool3 (1, 12, 12, 12, 256)
# pool3_frac = fractal_net(
# is_global_path_list[2],
# global_path_list[2],
# local_path_list[2],
# self.Blocks,
# self.Columns)(pool3_in)
pool3 = pool3_in
# pool3 = pool3_in + pool3_frac
conv4_1 = conv3d(
input=pool3,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
name='conv4a')
# conv4_1 (1, 12, 12, 12, 512)
conv4_1_bn = tf.contrib.layers.batch_norm(
conv4_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv4_1_batch_norm")
conv4_1_relu = tf.nn.relu(conv4_1_bn, name='conv4_1_relu')
conv4_2 = conv3d(
input=conv4_1_relu,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
name='conv4b')
conv4_2 = conv4_2 + conv4_1
# conv4_2 (1, 12, 12, 12, 512)
conv4_2_bn = tf.contrib.layers.batch_norm(
conv4_2,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv4_2_batch_norm")
conv4_2_relu = tf.nn.relu(conv4_2_bn, name='conv4_2_relu')
pool4 = tf.layers.max_pooling3d(
inputs=conv4_2_relu,
pool_size=2,
strides=2,
name='pool4')
# pool4 (1, 6, 6, 6, 512)
conv5_1 = conv_bn_relu(
input=pool4,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='conv5_1')
# conv5_1 (1, 6, 6, 6, 512)
conv5_2 = conv_bn_relu(
input=conv5_1,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='conv5_2')
# conv5_2 (1, 6, 6, 6, 512)
deconv1_1 = deconv_bn_relu(
input=conv5_2,
output_chn=512,
is_training=phase_flag,
name='deconv1_1')
# (1, 12, 12, 12, 512)
concat_1 = tf.concat([deconv1_1, conv4_2],
axis=concat_dim, name='concat_1')
# (1, 12, 12, 12, 1024)
deconv1_2_in = conv_bn_relu(
input=concat_1,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv1_2')
# deconv1_2_frac = fractal_net(
# is_global_path_list[3],
# global_path_list[3],
# local_path_list[3],
# self.Blocks,
# self.Columns)(deconv1_2_in)
deconv1_2 = deconv1_2_in
# deconv1_2 = deconv1_2_in + deconv1_2_frac # (1, 12, 12, 12, 256)
deconv2_1 = deconv_bn_relu(
input=deconv1_2,
output_chn=256,
is_training=phase_flag,
name='deconv2_1')
concat_2 = tf.concat([deconv2_1, conv3_2],
axis=concat_dim, name='concat_2')
# deconv2_2 (1, 24, 24, 24, 512)
deconv2_2_in = conv_bn_relu(
input=concat_2,
output_chn=128,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv2_2')
# deconv2_2_frac = fractal_net(
# is_global_path_list[4],
# global_path_list[4],
# local_path_list[4],
# self.Blocks,
# self.Columns)(deconv2_2_in)
deconv2_2 = deconv2_2_in
# deconv2_2 = deconv2_2_in + deconv2_2_frac
# deconv2_2 (1, 24, 24, 24, 128)
deconv3_1 = deconv_bn_relu(
input=deconv2_2,
output_chn=128,
is_training=phase_flag,
name='deconv3_1')
# deconv3_1 (1, 48, 48, 48, 128)
concat_3 = tf.concat([deconv3_1, conv2_1],
axis=concat_dim, name='concat_3')
# deconv3_1 (1, 48, 48, 48, 256)
deconv3_2_in = conv_bn_relu(
input=concat_3,
output_chn=64,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv3_2')
# deconv3_2_frac = fractal_net(
# is_global_path_list[5],
# global_path_list[5],
# local_path_list[5],
# self.Blocks,
# self.Columns)(deconv3_2_in)
deconv3_2 = deconv3_2_in
# deconv3_2 = deconv3_2_in + deconv3_2_frac
# deconv3_2 (1, 48, 48, 48, 64)
deconv4_1 = deconv_bn_relu(
input=deconv3_2,
output_chn=64,
is_training=phase_flag,
name='deconv4_1')
# deconv4_2 (1, 96, 96, 96, 32)
concat_4 = tf.concat([deconv4_1, conv1_1],
axis=concat_dim, name='concat_4')
# deconv4_2 (1, 96, 96, 96, 64)
deconv4_2 = conv_bn_relu(
input=concat_4,
output_chn=32,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv4_2') # deconv4_2 (1, 96, 96, 96, 32)
pre_pro = conv3d(
input=deconv4_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='pre_pro')
# pred_frac = fractal_net(is_global_path_list[3],global_path_list[3],local_path_list[3],self.Blocks,self.Columns)(pre_pro)
pred_prob = pre_pro # pred_prob (1, 96, 96, 96, 8) Here get the prediction
# ======================For predicition=============================
# auxiliary prediction 0
aux0_conv = conv3d(
input=deconv1_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux0_conv') # aux0_conv (1, 12, 12, 12, 8) 8 class output
aux0_deconv_1 = Deconv3d(
input=aux0_conv,
output_chn=output_channel,
name='aux0_deconv_1') # aux0_deconv_1 (1, 24, 24, 24, 8)
aux0_deconv_2 = Deconv3d(
input=aux0_deconv_1,
output_chn=output_channel,
name='aux0_deconv_2') # aux0_deconv_2 (1, 48, 48, 48, 8)
aux0_prob = Deconv3d(
input=aux0_deconv_2,
output_chn=output_channel,
name='aux0_prob') # aux0_prob (1, 96, 96, 96, 8)
# auxiliary prediction 1
aux1_conv = conv3d(
input=deconv2_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux1_conv') # aux1_conv (1, 24, 24, 24, 8)
aux1_deconv_1 = Deconv3d(
input=aux1_conv,
output_chn=output_channel,
name='aux1_deconv_1') # aux1_deconv_1 (1, 48, 48, 48, 8)
aux1_prob = Deconv3d(
input=aux1_deconv_1,
output_chn=output_channel,
name='aux1_prob') # aux1_prob (1, 96, 96, 96, 8)
# auxiliary prediction 2
aux2_conv = conv3d(
input=deconv3_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux2_conv') # aux2_conv (1, 48, 48, 48, 8)
aux2_prob = Deconv3d(
input=aux2_conv,
output_chn=output_channel,
name='aux2_prob') # aux2_prob (1, 96, 96, 96, 8)
soft_prob = tf.nn.softmax(pred_prob, name='pred_soft')
pred_label = tf.argmax(soft_prob, axis=4, name='argmax')
return pred_prob, pred_label, aux0_prob, aux1_prob, aux2_prob
def unet_resnet(input_pred, input_img, output_channel, stage):
input_shape = input_img.shape
input_channel = input_shape.dims[-1].value
input_pred_softmax = tf.nn.softmax(input_pred, name='softmax_ss' + stage)
forground_input_pred = tf.expand_dims(input_pred_softmax[:, :, :, :, 1], axis=-1)
input_concat = tf.concat([forground_input_pred, input_img], axis=-1) # (1, 96, 96, 96, 2)
input_attention = forground_input_pred * input_img # (1, 96, 96, 96, input_channel)
# conv block1
conv_bn_1_1 = conv_bn_relu(input=input_attention, output_chn=16, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block1_conv1')
input_cat = tf.concat([input_attention, input_attention, input_attention, input_attention,
input_attention, input_attention, input_attention, input_attention], axis=-1)
# diffirence for odd input or even input
if input_channel % 2 == 0 or input_channel == 1:
input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/input_channel)], name='tile' + stage)
else:
input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/(input_channel-1))], name='tile' + stage)
input_tile = input_tile[:,:,:,:,0:16]
conv_bn_skip_1_1 = input_tile + conv_bn_1_1
pool1_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_1_1, pool_size=2, strides=2, name=stage + 'pool_1_1')
# conv block2
conv_bn_2_1 = conv_bn_relu(input=pool1_1, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block2_conv1')
conv_bn_2_2 = conv_bn_relu(input=conv_bn_2_1, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block2_conv2')
pool1_1_cat = tf.concat([pool1_1, pool1_1], axis=-1)
conv_bn_skip_2_1 = pool1_1_cat + conv_bn_2_2
pool_2_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_2_1, pool_size=2, strides=2, name=stage + 'pool2_2')
# conv block3
conv_bn_3_1 = conv_bn_relu(input=pool_2_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv1')
conv_bn_3_2 = conv_bn_relu(input=conv_bn_3_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv2')
conv_bn_3_3 = conv_bn_relu(input=conv_bn_3_2, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv3')
pool_2_1_cat = tf.concat([pool_2_1, pool_2_1], axis=-1)
conv_bn_skip_3_1 = conv_bn_3_3 + pool_2_1_cat
pool3_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_3_1, pool_size=2, strides=2, name=stage + 'pool3_1')
# conv block4
conv_bn_4_1 = conv_bn_relu(input=pool3_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv1')
conv_bn_4_2 = conv_bn_relu(input=conv_bn_4_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv2')
conv_bn_4_3 = conv_bn_relu(input=conv_bn_4_2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv3')
pool3_1_cat = tf.concat([pool3_1, pool3_1], axis=-1)
conv_bn_skip_4_1 = conv_bn_4_3 + pool3_1_cat
pool4_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_4_1, pool_size=2, strides=2, name=stage + 'pool4_1')
# conv block5
conv_bn_5_1 = conv_bn_relu(input=pool4_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv1')
conv_bn_5_2 = conv_bn_relu(input=conv_bn_5_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv2')
conv_bn_5_3 = conv_bn_relu(input=conv_bn_5_2, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv3')
pool4_1_cat = tf.concat([pool4_1, pool4_1], axis=-1)
conv_bn_skip_5_1 = conv_bn_5_3 + pool4_1_cat
# upsampling conv block6
deconv_bn_1_1 = deconv_bn_relu(input=conv_bn_skip_5_1, output_chn=128, is_training=True,
name=stage + 'deconv_1_1')
concat1 = tf.concat([deconv_bn_1_1, conv_bn_skip_4_1], axis=-1, name=stage + 'concat1')
conv_bn_6_1 = conv_bn_relu(input=concat1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block6_conv1')
conv_bn_6_2 = conv_bn_relu(input=conv_bn_6_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block6_conv2')
conv_bn_6_3 = conv_bn_relu(input=conv_bn_6_2, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'blovk6_conv3')
deconv_bn_1_1_cat = tf.concat([deconv_bn_1_1, deconv_bn_1_1], axis=-1)
conv_bn_skip_6_1 = conv_bn_6_3 + deconv_bn_1_1_cat
# conv block7
deconv_bn_2_1 = deconv_bn_relu(input=conv_bn_skip_6_1, output_chn=64, is_training=True,
name=stage + 'deconv_2_1')
concat2 = tf.concat([deconv_bn_2_1, conv_bn_skip_3_1], axis=-1, name=stage + 'concat2')
conv_bn_7_1 = conv_bn_relu(input=concat2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv1')
conv_bn_7_2 = conv_bn_relu(input=conv_bn_7_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv2')
conv_bn_7_3 = conv_bn_relu(input=conv_bn_7_2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv3')
deconv_bn_2_1_cat = tf.concat([deconv_bn_2_1, deconv_bn_2_1], axis=-1)
conv_bn_skip_7_1 = conv_bn_7_3 + deconv_bn_2_1_cat
# conv block8
deconv_bn_3_1 = deconv_bn_relu(input=conv_bn_skip_7_1, output_chn=32, is_training=True,
name=stage + 'deconv_3_1')
concat3 = tf.concat([deconv_bn_3_1, conv_bn_skip_2_1], axis=-1, name=stage + 'concat3')
conv_bn_8_1 = conv_bn_relu(input=concat3, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block8_conv1')
conv_bn_8_2 = conv_bn_relu(input=conv_bn_8_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block8_conv2')
deconv_bn_3_1_cat = tf.concat([deconv_bn_3_1, deconv_bn_3_1], axis=-1)
conv_bn_skip_8_1 = conv_bn_8_2 + deconv_bn_3_1_cat
# conv block9
deconv_bn_4_1 = deconv_bn_relu(input=conv_bn_skip_8_1, output_chn=16, is_training=True,
name=stage + 'deconv_4_1')
concat4 = tf.concat([deconv_bn_4_1, conv_bn_skip_1_1], axis=-1, name=stage + 'conca4_1')
conv_bn_9_1 = conv_bn_relu(input=concat4, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block9_conv1')
deconv_bn_4_1_cat = tf.concat([deconv_bn_4_1, deconv_bn_4_1], axis=-1)
conv_bn_skip_9_1 = conv_bn_9_1 + deconv_bn_4_1_cat
# prediction layer
pred = conv3d(input=conv_bn_skip_9_1, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True,
name=stage + 'pred')
soft_prob_v = tf.nn.softmax(pred, name='pred_soft_v')
pred_label_v = tf.argmax(soft_prob_v, axis=4, name='argmax_v')
return pred, pred_label_v
def conv3d(
input,
output_chn,
kernel_size,
stride,
use_bias=False,
name='conv'):
return tf.layers.conv3d(
inputs=input,
filters=output_chn,
kernel_size=kernel_size,
strides=stride,
padding='same',
data_format='channels_last',
kernel_initializer=tf.truncated_normal_initializer(
0.0,
0.01),
kernel_regularizer=slim.l2_regularizer(0.0005),
use_bias=use_bias,
name=name)
def conv_bn_relu(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
name):
with tf.variable_scope(name):
conv = conv3d(
input,
output_chn,
kernel_size,
stride,
use_bias,
name='conv')
bn = tf.contrib.layers.batch_norm(
conv,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
scope="batch_norm")
relu = tf.nn.relu(bn, name='relu')
return relu
# deconvolution
def Deconv3d(input, output_chn, name):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input.get_shape()]
filter = tf.get_variable(
name + "/filter",
shape=[
4,
4,
4,
output_chn,
in_channels],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
0,
0.01),
regularizer=slim.l2_regularizer(0.0005))
conv = tf.nn.conv3d_transpose(
value=input,
filter=filter,
output_shape=[
batch,
in_depth * 2,
in_height * 2,
in_width * 2,
output_chn],
strides=[
1,
2,
2,
2,
1],
padding="SAME",
name=name)
return conv
def Unsample(input, output_chn, name):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input.get_shape()]
base = input.shape[-2]
data = 96 / int(base)
print("base shape", data)
filter = tf.get_variable(
name + "/filter",
shape=[
4,
4,
4,
output_chn,
in_channels],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
0,
0.01),
regularizer=slim.l2_regularizer(0.0005))
conv = tf.nn.conv3d_transpose(
value=input, filter=filter, output_shape=[
batch, 96, 96, 96, output_chn], strides=[
1, data, data, data, 1], padding="SAME", name=name)
return conv
def deconv_bn_relu(input, output_chn, is_training, name):
with tf.variable_scope(name):
conv = Deconv3d(input, output_chn, name='deconv')
# with tf.device("/cpu:0"):
bn = tf.contrib.layers.batch_norm(
conv,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
scope="batch_norm")
relu = tf.nn.relu(bn, name='relu')
return relu
def conv_bn_relu_x3(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
name):
with tf.variable_scope(name):
z = conv_bn_relu(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense1")
z_out = conv_bn_relu(
z,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense2")
z_out = conv_bn_relu(
z_out,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense3")
return z + z_out
| 34.819182
| 126
| 0.597065
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tflearn.layers.conv import global_avg_pool
conv1_relu')
pool1_in = tf.layers.max_pooling3d(
inputs=conv1_relu, pool_size=2, strides=2, name='pool1')
pool1 = pool1_in
conv2_1 = conv3d(
input=pool1,
output_chn=128,
kernel_size=3,
stride=1,
use_bias=False,
name='conv2')
conv2_bn = tf.contrib.layers.batch_norm(
conv2_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv2_batch_norm")
conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu')
pool2_in = tf.layers.max_pooling3d(
inputs=conv2_relu, pool_size=2, strides=2, name='pool2')
pool2 = pool2_in
conv3_1 = conv3d(
input=pool2,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
name='conv3a')
conv3_1_bn = tf.contrib.layers.batch_norm(
conv3_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv3_1_batch_norm")
conv3_1_relu = tf.nn.relu(conv3_1_bn, name='conv3_1_relu')
conv3_2 = conv3d(
input=conv3_1_relu,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
name='conv3b')
conv3_2 = conv3_2 + conv3_1
conv3_2_bn = tf.contrib.layers.batch_norm(
conv3_2,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv3_2_batch_norm")
conv3_2_relu = tf.nn.relu(conv3_2_bn, name='conv3_2_relu')
pool3_in = tf.layers.max_pooling3d(
inputs=conv3_2_relu, pool_size=2, strides=2, name='pool3')
pool3 = pool3_in
conv4_1 = conv3d(
input=pool3,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
name='conv4a')
conv4_1_bn = tf.contrib.layers.batch_norm(
conv4_1,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv4_1_batch_norm")
conv4_1_relu = tf.nn.relu(conv4_1_bn, name='conv4_1_relu')
conv4_2 = conv3d(
input=conv4_1_relu,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
name='conv4b')
conv4_2 = conv4_2 + conv4_1
conv4_2_bn = tf.contrib.layers.batch_norm(
conv4_2,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=phase_flag,
scope="conv4_2_batch_norm")
conv4_2_relu = tf.nn.relu(conv4_2_bn, name='conv4_2_relu')
pool4 = tf.layers.max_pooling3d(
inputs=conv4_2_relu,
pool_size=2,
strides=2,
name='pool4')
conv5_1 = conv_bn_relu(
input=pool4,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='conv5_1')
conv5_2 = conv_bn_relu(
input=conv5_1,
output_chn=512,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='conv5_2')
deconv1_1 = deconv_bn_relu(
input=conv5_2,
output_chn=512,
is_training=phase_flag,
name='deconv1_1')
concat_1 = tf.concat([deconv1_1, conv4_2],
axis=concat_dim, name='concat_1')
deconv1_2_in = conv_bn_relu(
input=concat_1,
output_chn=256,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv1_2')
deconv1_2 = deconv1_2_in
nv_bn_relu(
input=deconv1_2,
output_chn=256,
is_training=phase_flag,
name='deconv2_1')
concat_2 = tf.concat([deconv2_1, conv3_2],
axis=concat_dim, name='concat_2')
deconv2_2_in = conv_bn_relu(
input=concat_2,
output_chn=128,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv2_2')
deconv2_2 = deconv2_2_in
deconv3_1 = deconv_bn_relu(
input=deconv2_2,
output_chn=128,
is_training=phase_flag,
name='deconv3_1')
concat_3 = tf.concat([deconv3_1, conv2_1],
axis=concat_dim, name='concat_3')
deconv3_2_in = conv_bn_relu(
input=concat_3,
output_chn=64,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv3_2')
deconv3_2 = deconv3_2_in
deconv4_1 = deconv_bn_relu(
input=deconv3_2,
output_chn=64,
is_training=phase_flag,
name='deconv4_1')
concat_4 = tf.concat([deconv4_1, conv1_1],
axis=concat_dim, name='concat_4')
deconv4_2 = conv_bn_relu(
input=concat_4,
output_chn=32,
kernel_size=3,
stride=1,
use_bias=False,
is_training=phase_flag,
name='deconv4_2')
pre_pro = conv3d(
input=deconv4_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='pre_pro')
pred_prob = pre_pro
aux0_conv = conv3d(
input=deconv1_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux0_conv')
aux0_deconv_1 = Deconv3d(
input=aux0_conv,
output_chn=output_channel,
name='aux0_deconv_1')
aux0_deconv_2 = Deconv3d(
input=aux0_deconv_1,
output_chn=output_channel,
name='aux0_deconv_2')
aux0_prob = Deconv3d(
input=aux0_deconv_2,
output_chn=output_channel,
name='aux0_prob')
aux1_conv = conv3d(
input=deconv2_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux1_conv')
aux1_deconv_1 = Deconv3d(
input=aux1_conv,
output_chn=output_channel,
name='aux1_deconv_1')
aux1_prob = Deconv3d(
input=aux1_deconv_1,
output_chn=output_channel,
name='aux1_prob')
aux2_conv = conv3d(
input=deconv3_2,
output_chn=output_channel,
kernel_size=1,
stride=1,
use_bias=True,
name='aux2_conv')
aux2_prob = Deconv3d(
input=aux2_conv,
output_chn=output_channel,
name='aux2_prob')
soft_prob = tf.nn.softmax(pred_prob, name='pred_soft')
pred_label = tf.argmax(soft_prob, axis=4, name='argmax')
return pred_prob, pred_label, aux0_prob, aux1_prob, aux2_prob
def unet_resnet(input_pred, input_img, output_channel, stage):
input_shape = input_img.shape
input_channel = input_shape.dims[-1].value
input_pred_softmax = tf.nn.softmax(input_pred, name='softmax_ss' + stage)
forground_input_pred = tf.expand_dims(input_pred_softmax[:, :, :, :, 1], axis=-1)
input_concat = tf.concat([forground_input_pred, input_img], axis=-1)
input_attention = forground_input_pred * input_img
conv_bn_1_1 = conv_bn_relu(input=input_attention, output_chn=16, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block1_conv1')
input_cat = tf.concat([input_attention, input_attention, input_attention, input_attention,
input_attention, input_attention, input_attention, input_attention], axis=-1)
if input_channel % 2 == 0 or input_channel == 1:
input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/input_channel)], name='tile' + stage)
else:
input_tile = tf.tile(input=input_attention, multiples=[1, 1, 1, 1, int(16/(input_channel-1))], name='tile' + stage)
input_tile = input_tile[:,:,:,:,0:16]
conv_bn_skip_1_1 = input_tile + conv_bn_1_1
pool1_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_1_1, pool_size=2, strides=2, name=stage + 'pool_1_1')
conv_bn_2_1 = conv_bn_relu(input=pool1_1, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block2_conv1')
conv_bn_2_2 = conv_bn_relu(input=conv_bn_2_1, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block2_conv2')
pool1_1_cat = tf.concat([pool1_1, pool1_1], axis=-1)
conv_bn_skip_2_1 = pool1_1_cat + conv_bn_2_2
pool_2_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_2_1, pool_size=2, strides=2, name=stage + 'pool2_2')
conv_bn_3_1 = conv_bn_relu(input=pool_2_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv1')
conv_bn_3_2 = conv_bn_relu(input=conv_bn_3_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv2')
conv_bn_3_3 = conv_bn_relu(input=conv_bn_3_2, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block3_conv3')
pool_2_1_cat = tf.concat([pool_2_1, pool_2_1], axis=-1)
conv_bn_skip_3_1 = conv_bn_3_3 + pool_2_1_cat
pool3_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_3_1, pool_size=2, strides=2, name=stage + 'pool3_1')
conv_bn_4_1 = conv_bn_relu(input=pool3_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv1')
conv_bn_4_2 = conv_bn_relu(input=conv_bn_4_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv2')
conv_bn_4_3 = conv_bn_relu(input=conv_bn_4_2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block4_conv3')
pool3_1_cat = tf.concat([pool3_1, pool3_1], axis=-1)
conv_bn_skip_4_1 = conv_bn_4_3 + pool3_1_cat
pool4_1 = tf.layers.max_pooling3d(inputs=conv_bn_skip_4_1, pool_size=2, strides=2, name=stage + 'pool4_1')
conv_bn_5_1 = conv_bn_relu(input=pool4_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv1')
conv_bn_5_2 = conv_bn_relu(input=conv_bn_5_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv2')
conv_bn_5_3 = conv_bn_relu(input=conv_bn_5_2, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block5_conv3')
pool4_1_cat = tf.concat([pool4_1, pool4_1], axis=-1)
conv_bn_skip_5_1 = conv_bn_5_3 + pool4_1_cat
deconv_bn_1_1 = deconv_bn_relu(input=conv_bn_skip_5_1, output_chn=128, is_training=True,
name=stage + 'deconv_1_1')
concat1 = tf.concat([deconv_bn_1_1, conv_bn_skip_4_1], axis=-1, name=stage + 'concat1')
conv_bn_6_1 = conv_bn_relu(input=concat1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block6_conv1')
conv_bn_6_2 = conv_bn_relu(input=conv_bn_6_1, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block6_conv2')
conv_bn_6_3 = conv_bn_relu(input=conv_bn_6_2, output_chn=256, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'blovk6_conv3')
deconv_bn_1_1_cat = tf.concat([deconv_bn_1_1, deconv_bn_1_1], axis=-1)
conv_bn_skip_6_1 = conv_bn_6_3 + deconv_bn_1_1_cat
deconv_bn_2_1 = deconv_bn_relu(input=conv_bn_skip_6_1, output_chn=64, is_training=True,
name=stage + 'deconv_2_1')
concat2 = tf.concat([deconv_bn_2_1, conv_bn_skip_3_1], axis=-1, name=stage + 'concat2')
conv_bn_7_1 = conv_bn_relu(input=concat2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv1')
conv_bn_7_2 = conv_bn_relu(input=conv_bn_7_1, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv2')
conv_bn_7_3 = conv_bn_relu(input=conv_bn_7_2, output_chn=128, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block7_conv3')
deconv_bn_2_1_cat = tf.concat([deconv_bn_2_1, deconv_bn_2_1], axis=-1)
conv_bn_skip_7_1 = conv_bn_7_3 + deconv_bn_2_1_cat
deconv_bn_3_1 = deconv_bn_relu(input=conv_bn_skip_7_1, output_chn=32, is_training=True,
name=stage + 'deconv_3_1')
concat3 = tf.concat([deconv_bn_3_1, conv_bn_skip_2_1], axis=-1, name=stage + 'concat3')
conv_bn_8_1 = conv_bn_relu(input=concat3, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block8_conv1')
conv_bn_8_2 = conv_bn_relu(input=conv_bn_8_1, output_chn=64, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block8_conv2')
deconv_bn_3_1_cat = tf.concat([deconv_bn_3_1, deconv_bn_3_1], axis=-1)
conv_bn_skip_8_1 = conv_bn_8_2 + deconv_bn_3_1_cat
deconv_bn_4_1 = deconv_bn_relu(input=conv_bn_skip_8_1, output_chn=16, is_training=True,
name=stage + 'deconv_4_1')
concat4 = tf.concat([deconv_bn_4_1, conv_bn_skip_1_1], axis=-1, name=stage + 'conca4_1')
conv_bn_9_1 = conv_bn_relu(input=concat4, output_chn=32, kernel_size=3, stride=1, use_bias=False,
is_training=True, name=stage + 'block9_conv1')
deconv_bn_4_1_cat = tf.concat([deconv_bn_4_1, deconv_bn_4_1], axis=-1)
conv_bn_skip_9_1 = conv_bn_9_1 + deconv_bn_4_1_cat
pred = conv3d(input=conv_bn_skip_9_1, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True,
name=stage + 'pred')
soft_prob_v = tf.nn.softmax(pred, name='pred_soft_v')
pred_label_v = tf.argmax(soft_prob_v, axis=4, name='argmax_v')
return pred, pred_label_v
def conv3d(
input,
output_chn,
kernel_size,
stride,
use_bias=False,
name='conv'):
return tf.layers.conv3d(
inputs=input,
filters=output_chn,
kernel_size=kernel_size,
strides=stride,
padding='same',
data_format='channels_last',
kernel_initializer=tf.truncated_normal_initializer(
0.0,
0.01),
kernel_regularizer=slim.l2_regularizer(0.0005),
use_bias=use_bias,
name=name)
def conv_bn_relu(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
name):
with tf.variable_scope(name):
conv = conv3d(
input,
output_chn,
kernel_size,
stride,
use_bias,
name='conv')
bn = tf.contrib.layers.batch_norm(
conv,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
scope="batch_norm")
relu = tf.nn.relu(bn, name='relu')
return relu
def Deconv3d(input, output_chn, name):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input.get_shape()]
filter = tf.get_variable(
name + "/filter",
shape=[
4,
4,
4,
output_chn,
in_channels],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
0,
0.01),
regularizer=slim.l2_regularizer(0.0005))
conv = tf.nn.conv3d_transpose(
value=input,
filter=filter,
output_shape=[
batch,
in_depth * 2,
in_height * 2,
in_width * 2,
output_chn],
strides=[
1,
2,
2,
2,
1],
padding="SAME",
name=name)
return conv
def Unsample(input, output_chn, name):
batch, in_depth, in_height, in_width, in_channels = [
int(d) for d in input.get_shape()]
base = input.shape[-2]
data = 96 / int(base)
print("base shape", data)
filter = tf.get_variable(
name + "/filter",
shape=[
4,
4,
4,
output_chn,
in_channels],
dtype=tf.float32,
initializer=tf.random_normal_initializer(
0,
0.01),
regularizer=slim.l2_regularizer(0.0005))
conv = tf.nn.conv3d_transpose(
value=input, filter=filter, output_shape=[
batch, 96, 96, 96, output_chn], strides=[
1, data, data, data, 1], padding="SAME", name=name)
return conv
def deconv_bn_relu(input, output_chn, is_training, name):
with tf.variable_scope(name):
conv = Deconv3d(input, output_chn, name='deconv')
bn = tf.contrib.layers.batch_norm(
conv,
decay=0.9,
updates_collections=None,
epsilon=1e-5,
scale=True,
is_training=is_training,
scope="batch_norm")
relu = tf.nn.relu(bn, name='relu')
return relu
def conv_bn_relu_x3(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
name):
with tf.variable_scope(name):
z = conv_bn_relu(
input,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense1")
z_out = conv_bn_relu(
z,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense2")
z_out = conv_bn_relu(
z_out,
output_chn,
kernel_size,
stride,
use_bias,
is_training,
"dense3")
return z + z_out
| true
| true
|
79042649ef5f7122a4ceac473e6d6051996faa2e
| 16,376
|
py
|
Python
|
rls/utils/build_networks.py
|
yisuoyanyudmj/RLs-1
|
a336b57e804507bca23cbadc3b5af1924c80d942
|
[
"Apache-2.0"
] | 1
|
2021-01-11T18:37:57.000Z
|
2021-01-11T18:37:57.000Z
|
rls/utils/build_networks.py
|
kiminh/RLs
|
a336b57e804507bca23cbadc3b5af1924c80d942
|
[
"Apache-2.0"
] | null | null | null |
rls/utils/build_networks.py
|
kiminh/RLs
|
a336b57e804507bca23cbadc3b5af1924c80d942
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
'''
visual_s -> visual_net -> feat ↘
feat -> encoder_net -> feat ↘ ↗ feat
s -> vector_net -> feat ↗ -> memory_net ->
cell_state ↗ ↘ cell_state
'''
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
'''TODO: Annotation
params:
batch_size: int
data: [B, x]
'''
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
'''
params:
s: [B*T, x]
visual_s: [B*T, y]
cell_state: Tuple([B, z],)
return:
feat: [B, a]
cell_state: Tuple([B, z],)
'''
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]]) # [B, T+1, N] => [B*(T+1), N]
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
# reshape feature from [B*T, x] to [B, T, x]
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
# reshape feature from [B, T, x] to [B*T, x]
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
'''
params:
visual_s: [B, N, H, W, C]
return:
feat: [B, x]
'''
# TODO
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
'''
params:
s: [B, x]
return:
feat: [B, y]
'''
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
'''
params:
s: [B, x]
visual_s: [B, y]
return:
feat: [B, z]
'''
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
'''
feat -> value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
'''
↗ value_net1 -> outputs
feat
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
'''
↗ policy_net -> outputs
feat
↘ value_net -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
# feature [B, x]
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
'''重载'''
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
'''
Use for PD-DDPG
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
'''
↗ policy_net -> outputs
feat -> value_net -> outputs
↘ value_net2 -> outputs
'''
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
| 33.975104
| 97
| 0.578163
|
import numpy as np
import tensorflow as tf
from copy import deepcopy
from abc import ABC, abstractmethod
from tensorflow.keras import Model as M
from rls.utils.indexs import OutputNetworkType
from rls.nn.networks import get_visual_network_from_type
from rls.nn.models import get_output_network_from_type
from rls.nn.networks import (MultiVectorNetwork,
MultiVisualNetwork,
EncoderNetwork,
MemoryNetwork)
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class RepresentationNetwork(ABC):
def __init__(self, name: str = 'test'):
self.name = name
self.h_dim = None
@abstractmethod
def __call__(self):
pass
@property
@abstractmethod
def trainable_variables(self):
pass
@property
@abstractmethod
def weights(self):
pass
@property
@abstractmethod
def _policy_models(self):
pass
@property
@abstractmethod
def _all_models(self):
pass
class DefaultRepresentationNetwork(RepresentationNetwork):
def __init__(self,
name: str = 'test',
vec_dims=[],
vis_dims=[],
vector_net_kwargs: dict = {},
visual_net_kwargs: dict = {},
encoder_net_kwargs: dict = {},
memory_net_kwargs: dict = {}):
super().__init__(name)
self.vector_net = MultiVectorNetwork(vec_dims, **vector_net_kwargs)
logger.debug('initialize vector network successfully.')
self.visual_net = MultiVisualNetwork(vis_dims, **visual_net_kwargs)
logger.debug('initialize visual network successfully.')
encoder_dim = self.vector_net.h_dim + self.visual_net.h_dim
self.encoder_net = EncoderNetwork(encoder_dim, **encoder_net_kwargs)
logger.debug('initialize encoder network successfully.')
memory_dim = self.encoder_net.h_dim
self.memory_net = MemoryNetwork(memory_dim, **memory_net_kwargs)
logger.debug('initialize memory network successfully.')
self.h_dim = self.memory_net.h_dim
def split(self, batch_size, data):
if self.memory_net.use_rnn:
data = tf.reshape(data, [batch_size, -1, tf.shape(data)[-1]])
d, d_ = data[:, :-1], data[:, 1:]
d, d_ = tf.reshape(d, [-1, tf.shape(d)[-1]]), tf.reshape(d_, [-1, tf.shape(d_)[-1]])
return d, d_
else:
return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False):
batch_size = tf.shape(s)[0]
if self.memory_net.use_rnn:
s = tf.reshape(s, [-1, tf.shape(s)[-1]])
if self.visual_net.use_visual:
visual_s = tf.reshape(visual_s, [-1, *tf.shape(visual_s)[2:]])
feat = self.get_encoder_feature(s, visual_s)
if self.memory_net.use_rnn:
feat = tf.reshape(feat, (batch_size, -1, tf.shape(feat)[-1]))
feat, cell_state = self.memory_net(feat, *cell_state)
feat = tf.reshape(feat, (-1, tf.shape(feat)[-1]))
if need_split:
feat = self.split(batch_size, feat)
return feat, cell_state
def get_vis_feature(self, visual_s):
viss = [visual_s[:, i] for i in range(visual_s.shape[1])]
return self.visual_net(*viss)
def get_vec_feature(self, s):
return self.vector_net(s)
def get_encoder_feature(self, s, visual_s):
if self.vector_net.use_vector and self.visual_net.use_visual:
feat = self.get_vec_feature(s)
vis_feat = self.get_vis_feature(visual_s)
feat = tf.concat([feat, vis_feat], axis=-1)
elif self.visual_net.use_visual:
vis_feat = self.get_vis_feature(visual_s)
feat = vis_feat
else:
feat = self.get_vec_feature(s)
encoder_feature = self.encoder_net(feat)
return encoder_feature
@property
def trainable_variables(self):
tv = []
tv += self.vector_net.trainable_variables
tv += self.visual_net.trainable_variables
tv += self.encoder_net.trainable_variables
tv += self.memory_net.trainable_variables
return tv
@property
def weights(self):
ws = []
ws += self.vector_net.weights
ws += self.visual_net.weights
ws += self.encoder_net.weights
ws += self.memory_net.weights
return ws
@property
def _policy_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
@property
def _all_models(self):
models = {}
models.update({self.name + '/' + 'vector_net': self.vector_net})
models.update({self.name + '/' + 'visual_net': self.visual_net})
models.update({self.name + '/' + 'encoder_net': self.encoder_net})
models.update({self.name + '/' + 'memory_net': self.memory_net})
return models
class ValueNetwork:
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
assert value_net_type is not None, 'assert value_net_type is not None'
super().__init__()
self.name = name
self.representation_net = representation_net
if self.representation_net is not None:
self.value_net = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
assert self.representation_net is not None, 'self.representation_net is not None'
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
return output, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
return output
@property
def trainable_variables(self):
tv = self.representation_net.trainable_variables if self.representation_net else []
tv += self.value_net.trainable_variables
return tv
@property
def weights(self):
ws = self.representation_net.weights if self.representation_net else []
ws += self.value_net.weights
return ws
@property
def _policy_models(self):
models = self.representation_net._policy_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
@property
def _all_models(self):
models = self.representation_net._all_models if self.representation_net else {}
models.update({self.name + '/' + 'value_net': self.value_net})
return models
class DoubleValueNetwork(ValueNetwork):
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2, cell_state
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ACNetwork(ValueNetwork):
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net, value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.policy_net = get_output_network_from_type(policy_net_type)(
vector_dim=self.representation_net.h_dim, **policy_net_kwargs)
else:
self.policy_net = get_output_network_from_type(policy_net_type)(
**policy_net_kwargs)
def __call__(self, s, visual_s, *args, cell_state=(None,), **kwargs):
feat, cell_state = self.representation_net(s, visual_s, cell_state)
output = self.policy_net(feat, *args, **kwargs)
return output, cell_state
@property
def actor_trainable_variables(self):
return self.policy_net.trainable_variables
@property
def critic_trainable_variables(self):
return super().trainable_variables
@property
def weights(self):
return super().weights + self.policy_net.weights
@property
def _policy_models(self):
models = super()._policy_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'policy_net': self.policy_net})
return models
class ACCNetwork(ACNetwork):
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {},
value_net2_type: OutputNetworkType = None,
value_net2_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net2_type)(
vector_dim=self.representation_net.h_dim, **value_net2_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net2_type)(
**value_net2_kwargs)
@property
def critic_trainable_variables(self):
return super().critic_trainable_variables + self.value_net2.trainable_variables
@property
def value_net_trainable_variables(self):
return super().critic_trainable_variables
@property
def value_net2_trainable_variables(self):
return self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
class ADoubleCNetwork(ACNetwork):
def __init__(self,
name: str = 'test',
representation_net: RepresentationNetwork = None,
policy_net_type: OutputNetworkType = None,
policy_net_kwargs: dict = {},
value_net_type: OutputNetworkType = None,
value_net_kwargs: dict = {}):
super().__init__(name, representation_net,
policy_net_type, policy_net_kwargs,
value_net_type, value_net_kwargs)
if self.representation_net is not None:
self.value_net2 = get_output_network_from_type(value_net_type)(
vector_dim=self.representation_net.h_dim, **value_net_kwargs)
else:
self.value_net2 = get_output_network_from_type(value_net_type)(
**value_net_kwargs)
def get_value(self, feat, *args, **kwargs):
output = self.value_net(feat, *args, **kwargs)
output2 = self.value_net2(feat, *args, **kwargs)
return output, output2
def get_min(self, *args, **kwargs):
return tf.minimum(*self.get_value(*args, **kwargs))
def get_max(self, *args, **kwargs):
return tf.maximum(*self.get_value(*args, **kwargs))
@property
def critic_trainable_variables(self):
return super().trainable_variables + self.value_net2.trainable_variables
@property
def weights(self):
return super().weights + self.value_net2.weights
@property
def _all_models(self):
models = super()._all_models
models.update({self.name + '/' + 'value_net2': self.value_net2})
return models
| true
| true
|
790426e1ad4f499c37e7bc3113074e4ba989eb0b
| 241
|
py
|
Python
|
app/twitter.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | 20
|
2021-12-21T17:46:33.000Z
|
2022-01-22T15:23:24.000Z
|
app/twitter.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | null | null | null |
app/twitter.py
|
janaSunrise/Spotify-Twitter-Banner
|
a3b5fc636ef4e79f3f96cc3dd5569eaff7d8a6c0
|
[
"MIT"
] | null | null | null |
import tweepy
from .config import Config
def update_twitter_banner(api: tweepy.API) -> None:
"""Update the twitter banner of the current profile using the image specified in config."""
api.update_profile_banner(Config.IMAGE_PATH)
| 26.777778
| 95
| 0.767635
|
import tweepy
from .config import Config
def update_twitter_banner(api: tweepy.API) -> None:
api.update_profile_banner(Config.IMAGE_PATH)
| true
| true
|
790427216325f05e43c495a60f6c3850397bbf84
| 264
|
py
|
Python
|
2021/Day 7 - The Treachery of Whales/2.py
|
Ashwin-op/Advent_of_Code
|
39937379c7cf1c728326af526a71fdd84706a2b8
|
[
"MIT"
] | 2
|
2020-12-05T06:36:27.000Z
|
2021-07-07T11:10:52.000Z
|
2021/Day 7 - The Treachery of Whales/2.py
|
Ashwin-op/Advent_of_Code
|
39937379c7cf1c728326af526a71fdd84706a2b8
|
[
"MIT"
] | null | null | null |
2021/Day 7 - The Treachery of Whales/2.py
|
Ashwin-op/Advent_of_Code
|
39937379c7cf1c728326af526a71fdd84706a2b8
|
[
"MIT"
] | null | null | null |
from statistics import mean
with open("input.txt") as f:
values = [int(i) for i in f.readline().split(",")]
m_values = int(mean(values))
print(min(
sum(sum(range(1, abs(pos - i) + 1)) for pos in values)
for i in range(m_values - 1, m_values + 1)
))
| 22
| 58
| 0.625
|
from statistics import mean
with open("input.txt") as f:
values = [int(i) for i in f.readline().split(",")]
m_values = int(mean(values))
print(min(
sum(sum(range(1, abs(pos - i) + 1)) for pos in values)
for i in range(m_values - 1, m_values + 1)
))
| true
| true
|
7904272acf72176efbadbcd912cf75e5d8b57f85
| 10,900
|
py
|
Python
|
statement_to_tree.py
|
iolucas/mathview
|
ca576f74dc3aab0c1397048f6972dc1a1a309b84
|
[
"MIT"
] | 33
|
2016-09-23T15:05:24.000Z
|
2021-08-30T11:13:35.000Z
|
statement_to_tree.py
|
iolucas/mathview
|
ca576f74dc3aab0c1397048f6972dc1a1a309b84
|
[
"MIT"
] | 4
|
2016-12-14T03:41:55.000Z
|
2020-05-27T20:27:55.000Z
|
statement_to_tree.py
|
iolucas/mathview
|
ca576f74dc3aab0c1397048f6972dc1a1a309b84
|
[
"MIT"
] | 12
|
2016-08-20T10:40:21.000Z
|
2022-01-03T09:47:01.000Z
|
# Given a statement, find the parse tree that led to it.
from tree_parser import *
import pickle #pickle.dump(database.proof_datas, open(output_file,'wb'))
from copy import deepcopy
from tree import *
#output_file = 'tree_parse_data'
# class InitialStringSearcher:
# def __init__(self):
# self.nodes = {}
# self.return_value = None
#
# def add(self,string,value):
# return self.add()
#
# def add_with_index(self,string, value,index):
# if index==len(string):
# self.value=value
# char = string[index]
# if char not in self.nodes:
# self.nodes[char] = InitialStringSearcher()
# self.nodes[char].add_with_index(string,value,index+1)
class InitialStringSearcher:
def __init__(self):
self.known_values = {}
#self.known_lengths = {}
def add(self,string,value):
self.known_values[tuple(string)]=value
#print 'found new substring', value[0]
#self.known_lengths[string]=len(string)
def find_longest(self,string):
#label=any(label for label in self.known_values if is_initial_string(label,string))
for l in self.known_values:
if is_initial_string(l,string): return self.known_values[l]
return False
def clear(self):
self.known_values = {}
class StatementParser:
def __init__(self,database):
# this is a really ugly hack.
self.wff_variables = set(['ph','ps','ch','th','et','ze','si','rh','mu','la','ka','ta'])
self.set_variables = set(['a', 'a"', "a'", 'a0', 'a0_', 'a1', 'a1_', 'b', 'b"', "b'", 'b0', 'b0_', 'b1', 'b1_', 'c', 'c"', "c'", 'c0_', 'c1_', 'd', 'd"', "d'", 'd0', 'd1', 'e', 'e"', "e'", 'e0', 'e1', 'f', 'f"', "f'", 'f0_', 'f1', 'g', 'g"', "g'", 'g0', 'g1', 'h', 'h"', "h'", 'h0', 'h1', 'i', 'i"', "i'", 'i0', 'i1', 'j', 'j"', "j'", 'j0', 'j1', 'k', 'k"', "k'", 'k0', 'k1', 'l', 'l"', "l'", 'l0', 'l1', 'm', 'm"', "m'", 'm0', 'm1', 'n', 'n"', "n'", 'n0_', 'n1', 'o', 'o"', 'o"_', "o'", "o'_", 'o0', 'o0_', 'o1', 'o1_', 'p', 'p"', "p'", 'p0', 'p1', 'q', 'q"', "q'", 'q0', 'q1', 'r', 'r"', "r'", 'r0', 'r1', 's', 's"', 's"_', "s'", "s'_", 's0', 's1', 't', 't"', "t'", 't0', 't1', 'u', 'u"', "u'", 'u0', 'u1', 'v', 'v"', 'v"_', "v'", "v'_", 'v0', 'v1', 'v2', 'w', 'w"', "w'", 'w0', 'w1', 'x', 'x"', "x'", 'x0', 'x1', 'y', 'y"', "y'", 'y0', 'y1', 'z', 'z"', "z'", 'z0', 'z1'])
self.search = InitialStringSearcher()
self.failed_parses = set()
self.propositions = database.non_entails_axioms
# self.propositions = {}
# for label in database.propositions:
# statement = database.propositions[label].statement
# if statement[0] != '|-': # used in the parse tree
# self.propositions[label] = database.propositions[label]#statement
# add all the variables we ever use to self.variables
# self.variables = set()
# for label in database.propositions:
# self.variables=self.variables.union(database.propositions[label].block.v)
# predefine self.variables because why not
self.variables = set(['.0.', "d'", 'd"', 'D"', 'c0_', 'd0', 'd1', 'q1', 'q0', 'x1', 'G2', 'G1', 'G0', "G'", '-t', '-w', 'q"', 'G"', 'th', 'ta', 'g1', 'g0', 'H', '+t', 'p0', '.0b', "g'", 'P', 'F3', 'g"', 'X', "W'", 'W"', 'h', "ch'", 'J0', 'J1', 't0', 't1', 'p', 'W2', 'W1', '.(x)', "t'", "J'", 't"', 'J"', "w'", "M'", 'j', 'w"', 'M"', 'ze', 'j0', 'j1', '.id', 'M1', 'M0', 'w1', 'M2', "j'", 'j"', 'mu', 'et0', 'et1', 'H1_', "Z'", 'Z"', 'et"', "et'", 'Z0', 'Z1', 'C', 'P1', 'K', "z'", 'z"', 'P"', 'S', "v'_", "P'", 'z0', 'z1', 'c', 'L1_', '0w', "s'_", 'k', './\\', 'r"', 's', 'ch', 'O1_', 'S2', 'S1', 'S0', 'S"', 'v"_', 'b0_', 'ps', "m'", 'E1', 'm"', 'C"', 'o0_', "C'", 'G1_', 'm1', 'm0', 'C2', 'ph', 'C0', 'S1_', "q'", 'F', 'c"', "c'", 'N', 'V', 'f0_', 'F0', 'F1', 'F2', 'p1', '0t', 'f', 'D1_', 'n', 'p"', 'ch1', 'F"', "p'", 'v', 'f1', 'ph"', "ph'", 'o"_', 'f"', "f'", 'ph1', 'ph0', "ze'", 'V"', '.dom', "O'", "V'", 'I1', 'I0', 's1', 's0', 's"_', 'th1', 'th0', 'V0', 'V1', 'V2', 'V3', "th'", 's"', 'I"', "s'", 'th"', "I'", 'A', "L'", 'v"', 'L"', 'ta1', "v'", 'I', 'i0', 'Q', 'v1', 'v2', "ta'", 'L2', 'L3', 'L0', 'L1', 'Y', 'i"', '/t', "i'", 'a', 'si1', 'si0', 'Y1', 'i', 'Y"', 'D2', "Y'", 'q', 'si"', 'si', 'ze1', "si'", 'y', 'Y0', 'I2', 'ps"', 'y"', "ps'", "y'", 'y1', 'y0', 'ps0', 'ps1', 'O2', 'O1', 'O0', "o'_", 'la', '.Morphism', 'n0_', 'k0', 'O"', 'ze"', 'R0', 'D', 'L', 'ze0', "R'", 'c1_', 'T', 'R"', '.X.', '.1.', 'a0_', "l'", "B'", 'l"', 'd', 'B"', 'l', 'B0', 't', 'l0', 'l1', "b'", 'b"', '.(+)', 'U1', 'U0', 'h"', 'b0', 'b1', 'ta0', "U'", 'U"', "o'", "S'", "E'", 'o"', 'E"', 'i1', '+w', 'F1_', '.xb', 'Ro1', 'Ro2', 'rh', 'E0', 'o1', 'o0', "F'", 'B2', 'G', 'R1_', "e'", 'W0', 'I1_', 'O', 'e"', '._|_', 'W', 'x', 'e1', 'e0', '1t', '<_b', 'v0', '<_a', 'r0', 'r1', 'g', 'H2', 'H0', 'H1', 'o', "r'", 'w', 'H"', '.x.', "H'", 'K"', "K'", 'ta"', 'h0', 'h1', "X'", 'K1', 'K0', "T'", 'ch"', "h'", 'M1_', '.*', '.+', '.,', '.-', './', 'X"', 'u1', 'u0', '.<', "u'", 'X1', 'u"', 'ch0_', 'B', 'x"', 'N"', 'J', "x'", "N'", 'R', '.^', 'N0', 'N1', 'x0', 'Z', '.cod', 'C1', 'b', 'o1_', 'X0', '.graph', 'r', '.~', 'B1_', 'z', '.t', '.<_', '.w', 'Q1', 'Q0', 'V1_', 'rh1', 'rh0', 'w0', '~t', '~w', 'Q"', "Q'", 'et', 'rh"', '.||', "rh'", 'k"', 'A"', "k'", "A'", 'P0', 'a1_', '.\\/', 'B1', 'A1', 'A0', 'k1', 'A2', 'C1_', '.+b', 'a"', 'E', "a'", 'A1_', 'M', 'T0', 'T1', 'a1', 'a0', 'U', 'b1_', 'T"', 'ka', 'e', "D'", 'n"', 'm', "n'", 'u', 'D0', 'n1', '.Object', '.+^', 'D1'])
def parse_new_statement(self,statement,context):
string, tree = self.parse_statement(statement,context)
self.search.clear()
self.failed_parses = set()
return string,tree
# This checks whether we're just parsing a variable and otherwise check whether any of the propositions
# describes it.
def parse_statement(self,statement,context):
# this should return a parse tree for a list of strings that form statement
# check whether we are doomed to failure
if (statement[0],len(statement)) in self.failed_parses:
return False,False
# attempt to search for the phrase
tuples = self.search.find_longest(statement)
if tuples: return tuples
# check if it's just a variable
# if statement[1] in self.variables and not (statement[0]=='wff' and statement[1] not in self.wff_variables) and not (statement[0]=='set' and statement[1] not in self.set_variables):
# #yep. It's just a variable. Skip the type checking. Variables can only have one type ever, right?
# length = 1; # exclude the wff term
# string = statement[:2]
# tree = Tree(value='VAR'+statement[1])
# return string,tree
if statement[1] in self.variables:
for f in context.f.values():
if f.variable == statement[1] and f.vclass == statement[0]:
return statement[:2], f.tree
#found_valid_parsing = False
for prop_label in self.propositions:
prop = self.propositions[prop_label]
string, tree = self.proposition_describes_statement(prop,statement,context)
if string == False:
continue
#print 'found',string
#if statement[0]=='wff'
self.search.add(string,(string,tree)) # add to the search tree
return string, tree
#print 'could not find expression for ',statement
self.failed_parses.add((statement[0],len(statement)))
return False, False
# this is a brutally inefficient way to go about this
# when it completes it returns a parse tree for the statement and the length of the tree
def proposition_describes_statement(self,proposition,s,context):
prop_s = proposition.statement
# the types of all the free variables
variable_types = {hyp.variable:hyp.vclass for hyp in proposition.f.values()}
# string definitions of all the free variables
variable_definitions = {hyp.variable:None for hyp in proposition.f.values()}
#tree defintions of all the free variables
variable_trees = {hyp.variable:None for hyp in proposition.f.values()}
index_into_s = 0
index_into_prop_s=0
while index_into_prop_s < len(prop_s):
if index_into_s>=len(s):
#print 'ran out of s'
return False,False
prop_value = prop_s[index_into_prop_s]
if prop_value in variable_types:
# it's a variable
# check if already defined
if variable_definitions[prop_value]==None:
#then we need to figure out the parsing of the substatement
#print 'testing ',[variable_types[prop_value]]+s[index_into_s:],' because of ',proposition.label,prop_s
string, tree = self.parse_statement([variable_types[prop_value]]+s[index_into_s:],context)
if string == False:
return False,False
length = len(string)-1 # skip the wff/set/class bit
index_into_s+=length
index_into_prop_s+=1
variable_definitions[prop_value] = string[1:]
variable_trees[prop_value] = tree
continue
else:
#we've already seen this expression before
if is_initial_string(variable_definitions[prop_value],statement):
# Yes, yes, we get the point
index_into_s+=len(variable_definitions[prop_value])
index_into_prop_s+=1
continue
else:
return False,False # eh. Whatever.
else:
#it's not a variable
if prop_value == s[index_into_s]:
index_into_s+=1
index_into_prop_s+=1
continue
else:
#it's not a variable and it doesn't match
return False,False
#we have the entire parsing and it appears to work
leaves = [variable_trees[hyp.variable] for hyp in proposition.hyps if hyp.type == 'f']
tree = Tree(value=proposition.label,leaves = leaves)
#now construct the string.
out_string = s[:index_into_s]
return out_string, tree
def is_initial_string(initial_string,string):
#return (len(string)>=len(initial_string)) and string[:len(initial_string)]==initial_string
if len(initial_string)>len(string): return False
for i in range(len(initial_string)):
if initial_string[i]!=string[i]: return False
return True
| 57.671958
| 2,466
| 0.503761
|
from tree_parser import *
import pickle
from copy import deepcopy
from tree import *
class InitialStringSearcher:
def __init__(self):
self.known_values = {}
def add(self,string,value):
self.known_values[tuple(string)]=value
def find_longest(self,string):
for l in self.known_values:
if is_initial_string(l,string): return self.known_values[l]
return False
def clear(self):
self.known_values = {}
class StatementParser:
def __init__(self,database):
self.wff_variables = set(['ph','ps','ch','th','et','ze','si','rh','mu','la','ka','ta'])
self.set_variables = set(['a', 'a"', "a'", 'a0', 'a0_', 'a1', 'a1_', 'b', 'b"', "b'", 'b0', 'b0_', 'b1', 'b1_', 'c', 'c"', "c'", 'c0_', 'c1_', 'd', 'd"', "d'", 'd0', 'd1', 'e', 'e"', "e'", 'e0', 'e1', 'f', 'f"', "f'", 'f0_', 'f1', 'g', 'g"', "g'", 'g0', 'g1', 'h', 'h"', "h'", 'h0', 'h1', 'i', 'i"', "i'", 'i0', 'i1', 'j', 'j"', "j'", 'j0', 'j1', 'k', 'k"', "k'", 'k0', 'k1', 'l', 'l"', "l'", 'l0', 'l1', 'm', 'm"', "m'", 'm0', 'm1', 'n', 'n"', "n'", 'n0_', 'n1', 'o', 'o"', 'o"_', "o'", "o'_", 'o0', 'o0_', 'o1', 'o1_', 'p', 'p"', "p'", 'p0', 'p1', 'q', 'q"', "q'", 'q0', 'q1', 'r', 'r"', "r'", 'r0', 'r1', 's', 's"', 's"_', "s'", "s'_", 's0', 's1', 't', 't"', "t'", 't0', 't1', 'u', 'u"', "u'", 'u0', 'u1', 'v', 'v"', 'v"_', "v'", "v'_", 'v0', 'v1', 'v2', 'w', 'w"', "w'", 'w0', 'w1', 'x', 'x"', "x'", 'x0', 'x1', 'y', 'y"', "y'", 'y0', 'y1', 'z', 'z"', "z'", 'z0', 'z1'])
self.search = InitialStringSearcher()
self.failed_parses = set()
self.propositions = database.non_entails_axioms
# self.propositions = {}
# for label in database.propositions:
# statement = database.propositions[label].statement
# if statement[0] != '|-': # used in the parse tree
# self.propositions[label] = database.propositions[label]#statement
# add all the variables we ever use to self.variables
# self.variables = set()
# for label in database.propositions:
# self.variables=self.variables.union(database.propositions[label].block.v)
# predefine self.variables because why not
self.variables = set(['.0.', "d'", 'd"', 'D"', 'c0_', 'd0', 'd1', 'q1', 'q0', 'x1', 'G2', 'G1', 'G0', "G'", '-t', '-w', 'q"', 'G"', 'th', 'ta', 'g1', 'g0', 'H', '+t', 'p0', '.0b', "g'", 'P', 'F3', 'g"', 'X', "W'", 'W"', 'h', "ch'", 'J0', 'J1', 't0', 't1', 'p', 'W2', 'W1', '.(x)', "t'", "J'", 't"', 'J"', "w'", "M'", 'j', 'w"', 'M"', 'ze', 'j0', 'j1', '.id', 'M1', 'M0', 'w1', 'M2', "j'", 'j"', 'mu', 'et0', 'et1', 'H1_', "Z'", 'Z"', 'et"', "et'", 'Z0', 'Z1', 'C', 'P1', 'K', "z'", 'z"', 'P"', 'S', "v'_", "P'", 'z0', 'z1', 'c', 'L1_', '0w', "s'_", 'k', './\\', 'r"', 's', 'ch', 'O1_', 'S2', 'S1', 'S0', 'S"', 'v"_', 'b0_', 'ps', "m'", 'E1', 'm"', 'C"', 'o0_', "C'", 'G1_', 'm1', 'm0', 'C2', 'ph', 'C0', 'S1_', "q'", 'F', 'c"', "c'", 'N', 'V', 'f0_', 'F0', 'F1', 'F2', 'p1', '0t', 'f', 'D1_', 'n', 'p"', 'ch1', 'F"', "p'", 'v', 'f1', 'ph"', "ph'", 'o"_', 'f"', "f'", 'ph1', 'ph0', "ze'", 'V"', '.dom', "O'", "V'", 'I1', 'I0', 's1', 's0', 's"_', 'th1', 'th0', 'V0', 'V1', 'V2', 'V3', "th'", 's"', 'I"', "s'", 'th"', "I'", 'A', "L'", 'v"', 'L"', 'ta1', "v'", 'I', 'i0', 'Q', 'v1', 'v2', "ta'", 'L2', 'L3', 'L0', 'L1', 'Y', 'i"', '/t', "i'", 'a', 'si1', 'si0', 'Y1', 'i', 'Y"', 'D2', "Y'", 'q', 'si"', 'si', 'ze1', "si'", 'y', 'Y0', 'I2', 'ps"', 'y"', "ps'", "y'", 'y1', 'y0', 'ps0', 'ps1', 'O2', 'O1', 'O0', "o'_", 'la', '.Morphism', 'n0_', 'k0', 'O"', 'ze"', 'R0', 'D', 'L', 'ze0', "R'", 'c1_', 'T', 'R"', '.X.', '.1.', 'a0_', "l'", "B'", 'l"', 'd', 'B"', 'l', 'B0', 't', 'l0', 'l1', "b'", 'b"', '.(+)', 'U1', 'U0', 'h"', 'b0', 'b1', 'ta0', "U'", 'U"', "o'", "S'", "E'", 'o"', 'E"', 'i1', '+w', 'F1_', '.xb', 'Ro1', 'Ro2', 'rh', 'E0', 'o1', 'o0', "F'", 'B2', 'G', 'R1_', "e'", 'W0', 'I1_', 'O', 'e"', '._|_', 'W', 'x', 'e1', 'e0', '1t', '<_b', 'v0', '<_a', 'r0', 'r1', 'g', 'H2', 'H0', 'H1', 'o', "r'", 'w', 'H"', '.x.', "H'", 'K"', "K'", 'ta"', 'h0', 'h1', "X'", 'K1', 'K0', "T'", 'ch"', "h'", 'M1_', '.*', '.+', '.,', '.-', './', 'X"', 'u1', 'u0', '.<', "u'", 'X1', 'u"', 'ch0_', 'B', 'x"', 'N"', 'J', "x'", "N'", 'R', '.^', 'N0', 'N1', 'x0', 'Z', '.cod', 'C1', 'b', 'o1_', 'X0', '.graph', 'r', '.~', 'B1_', 'z', '.t', '.<_', '.w', 'Q1', 'Q0', 'V1_', 'rh1', 'rh0', 'w0', '~t', '~w', 'Q"', "Q'", 'et', 'rh"', '.||', "rh'", 'k"', 'A"', "k'", "A'", 'P0', 'a1_', '.\\/', 'B1', 'A1', 'A0', 'k1', 'A2', 'C1_', '.+b', 'a"', 'E', "a'", 'A1_', 'M', 'T0', 'T1', 'a1', 'a0', 'U', 'b1_', 'T"', 'ka', 'e', "D'", 'n"', 'm', "n'", 'u', 'D0', 'n1', '.Object', '.+^', 'D1'])
def parse_new_statement(self,statement,context):
string, tree = self.parse_statement(statement,context)
self.search.clear()
self.failed_parses = set()
return string,tree
# This checks whether we're just parsing a variable and otherwise check whether any of the propositions
# describes it.
def parse_statement(self,statement,context):
# this should return a parse tree for a list of strings that form statement
# check whether we are doomed to failure
if (statement[0],len(statement)) in self.failed_parses:
return False,False
# attempt to search for the phrase
tuples = self.search.find_longest(statement)
if tuples: return tuples
# check if it's just a variable
# if statement[1] in self.variables and not (statement[0]=='wff' and statement[1] not in self.wff_variables) and not (statement[0]=='set' and statement[1] not in self.set_variables):
# #yep. It's just a variable. Skip the type checking. Variables can only have one type ever, right?
# length = 1; # exclude the wff term
# string = statement[:2]
# tree = Tree(value='VAR'+statement[1])
# return string,tree
if statement[1] in self.variables:
for f in context.f.values():
if f.variable == statement[1] and f.vclass == statement[0]:
return statement[:2], f.tree
#found_valid_parsing = False
for prop_label in self.propositions:
prop = self.propositions[prop_label]
string, tree = self.proposition_describes_statement(prop,statement,context)
if string == False:
continue
#print 'found',string
#if statement[0]=='wff'
self.search.add(string,(string,tree)) # add to the search tree
return string, tree
#print 'could not find expression for ',statement
self.failed_parses.add((statement[0],len(statement)))
return False, False
# this is a brutally inefficient way to go about this
# when it completes it returns a parse tree for the statement and the length of the tree
def proposition_describes_statement(self,proposition,s,context):
prop_s = proposition.statement
# the types of all the free variables
variable_types = {hyp.variable:hyp.vclass for hyp in proposition.f.values()}
# string definitions of all the free variables
variable_definitions = {hyp.variable:None for hyp in proposition.f.values()}
#tree defintions of all the free variables
variable_trees = {hyp.variable:None for hyp in proposition.f.values()}
index_into_s = 0
index_into_prop_s=0
while index_into_prop_s < len(prop_s):
if index_into_s>=len(s):
#print 'ran out of s'
return False,False
prop_value = prop_s[index_into_prop_s]
if prop_value in variable_types:
# it's a variable
# check if already defined
if variable_definitions[prop_value]==None:
#then we need to figure out the parsing of the substatement
#print 'testing ',[variable_types[prop_value]]+s[index_into_s:],' because of ',proposition.label,prop_s
string, tree = self.parse_statement([variable_types[prop_value]]+s[index_into_s:],context)
if string == False:
return False,False
length = len(string)-1 # skip the wff/set/class bit
index_into_s+=length
index_into_prop_s+=1
variable_definitions[prop_value] = string[1:]
variable_trees[prop_value] = tree
continue
else:
#we've already seen this expression before
if is_initial_string(variable_definitions[prop_value],statement):
# Yes, yes, we get the point
index_into_s+=len(variable_definitions[prop_value])
index_into_prop_s+=1
continue
else:
return False,False # eh. Whatever.
else:
#it's not a variable
if prop_value == s[index_into_s]:
index_into_s+=1
index_into_prop_s+=1
continue
else:
#it's not a variable and it doesn't match
return False,False
#we have the entire parsing and it appears to work
leaves = [variable_trees[hyp.variable] for hyp in proposition.hyps if hyp.type == 'f']
tree = Tree(value=proposition.label,leaves = leaves)
#now construct the string.
out_string = s[:index_into_s]
return out_string, tree
def is_initial_string(initial_string,string):
#return (len(string)>=len(initial_string)) and string[:len(initial_string)]==initial_string
if len(initial_string)>len(string): return False
for i in range(len(initial_string)):
if initial_string[i]!=string[i]: return False
return True
| true
| true
|
79042801dc2e48249347def16368667b9df5fd6d
| 5,472
|
py
|
Python
|
backend/Gifts/views.py
|
exarus/GiftRecommenderSystem
|
a1f0bb44f365c917cf4d870bc93cf172e4d2af52
|
[
"Apache-2.0"
] | null | null | null |
backend/Gifts/views.py
|
exarus/GiftRecommenderSystem
|
a1f0bb44f365c917cf4d870bc93cf172e4d2af52
|
[
"Apache-2.0"
] | null | null | null |
backend/Gifts/views.py
|
exarus/GiftRecommenderSystem
|
a1f0bb44f365c917cf4d870bc93cf172e4d2af52
|
[
"Apache-2.0"
] | 1
|
2018-10-30T22:04:53.000Z
|
2018-10-30T22:04:53.000Z
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from Constants import *
from Gifts.getRecommendations.RS import Users, Recommendations
import json
# Create your views here.
def check_input(request, mandatory_fields, optional_fields=None):
if not optional_fields: optional_fields = []
for key in request.keys():
if key not in mandatory_fields and key not in optional_fields:
return {'result': 'Error', 'message': key + ' is not a valid field'}
for field in mandatory_fields:
if field not in request.keys():
return {'result': 'Error', 'message': field + ' do not presented'}
return {"result": "Success"}
def add_user(request):
if 'userProfile' not in request:
return JsonResponse({'result': 'Error', 'message': 'userProfile do not presented'})
result = check_input(request['userProfile'], ["sex", "age", "hobbies", "userType"],
["alreadyGifted", "lovedCategories"])
if result['result'] == "Error":
return JsonResponse(result)
if request['userProfile']['sex'] not in ['Female', 'Male']:
return JsonResponse({'result': 'Error', 'message': request['userProfile']['sex'] +
' is not a valid sex'})
if 'alreadyGifted' not in request['userProfile']:
request['userProfile']['alreadyGifted'] = []
if 'lovedCategories' not in request['userProfile']:
request['userProfile']['lovedCategories'] = []
try:
user_id = Users.add_user(request['userProfile'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error while adding user'})
return JsonResponse({'result': 'Success', 'data': {'userId': user_id}})
def make_list(request):
result = check_input(request, ["userId"], ["filter"])
if result['result'] == "Error":
return JsonResponse(result)
if 'filter' in request:
result = check_input(request['filter'], [], ["minPrice", "maxPrice"])
if result['result'] == "Error":
return JsonResponse(result)
min_price = None
max_price = None
if 'filter' in request:
if 'minPrice' in request['filter']:
min_price = request['filter']['minPrice']
if 'maxPrice' in request['filter']:
max_price = request['filter']['maxPrice']
try:
Recommendations.generate_list(request['userId'], min_price, max_price)
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'error', 'message': 'error while making list'})
return JsonResponse({'result': 'Success', 'data': {'numberOfPages': number_of_pages}})
def get_suggestions(request):
result = check_input(request, ["page", "userId"])
if result['result'] == "Error":
return JsonResponse(result)
try:
items = Recommendations.get_page(request['userId'], request['page'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error during getting list'})
if items:
request = {'result': 'Success', 'data': {'items': items, "numberOfPages": number_of_pages}}
elif items == []:
request = {'result': 'Error', 'message': 'page out of range'}
else:
request = {'result': 'Error', 'message': 'error during getting list'}
return JsonResponse(request)
def rate_item(request):
result = check_input(request, ["userId", "itemId", "rating"])
if result['result'] == "Error":
return JsonResponse(result)
try:
Recommendations.rate_and_remove(request['userId'], request['itemId'], request['rating'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({"result": "Error", "message": "error during rating item"})
return JsonResponse({"result": "Success", 'data': {'numberOfPages': number_of_pages}})
@csrf_exempt
def home(request):
if request.method == "POST":
try:
request_dict = json.loads(request.body)
print(request_dict)
if 'task' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'task do not presented'})
if 'data' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'data do not presented'})
if request_dict['task'] == 'addUser':
return add_user(request_dict['data'])
if request_dict['task'] == 'makeList':
return make_list(request_dict['data'])
if request_dict['task'] == 'getSuggestions':
return get_suggestions(request_dict['data'])
if request_dict['task'] == 'rateItem':
return rate_item(request_dict['data'])
return JsonResponse({'result': 'Error', 'message':
request_dict['task'] + " is not a valid task"})
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': "strange error"})
return HttpResponse('''
<h1>Welcome on GRS</h1>
''')
| 39.085714
| 99
| 0.6197
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from Constants import *
from Gifts.getRecommendations.RS import Users, Recommendations
import json
def check_input(request, mandatory_fields, optional_fields=None):
if not optional_fields: optional_fields = []
for key in request.keys():
if key not in mandatory_fields and key not in optional_fields:
return {'result': 'Error', 'message': key + ' is not a valid field'}
for field in mandatory_fields:
if field not in request.keys():
return {'result': 'Error', 'message': field + ' do not presented'}
return {"result": "Success"}
def add_user(request):
if 'userProfile' not in request:
return JsonResponse({'result': 'Error', 'message': 'userProfile do not presented'})
result = check_input(request['userProfile'], ["sex", "age", "hobbies", "userType"],
["alreadyGifted", "lovedCategories"])
if result['result'] == "Error":
return JsonResponse(result)
if request['userProfile']['sex'] not in ['Female', 'Male']:
return JsonResponse({'result': 'Error', 'message': request['userProfile']['sex'] +
' is not a valid sex'})
if 'alreadyGifted' not in request['userProfile']:
request['userProfile']['alreadyGifted'] = []
if 'lovedCategories' not in request['userProfile']:
request['userProfile']['lovedCategories'] = []
try:
user_id = Users.add_user(request['userProfile'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error while adding user'})
return JsonResponse({'result': 'Success', 'data': {'userId': user_id}})
def make_list(request):
result = check_input(request, ["userId"], ["filter"])
if result['result'] == "Error":
return JsonResponse(result)
if 'filter' in request:
result = check_input(request['filter'], [], ["minPrice", "maxPrice"])
if result['result'] == "Error":
return JsonResponse(result)
min_price = None
max_price = None
if 'filter' in request:
if 'minPrice' in request['filter']:
min_price = request['filter']['minPrice']
if 'maxPrice' in request['filter']:
max_price = request['filter']['maxPrice']
try:
Recommendations.generate_list(request['userId'], min_price, max_price)
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'error', 'message': 'error while making list'})
return JsonResponse({'result': 'Success', 'data': {'numberOfPages': number_of_pages}})
def get_suggestions(request):
result = check_input(request, ["page", "userId"])
if result['result'] == "Error":
return JsonResponse(result)
try:
items = Recommendations.get_page(request['userId'], request['page'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error during getting list'})
if items:
request = {'result': 'Success', 'data': {'items': items, "numberOfPages": number_of_pages}}
elif items == []:
request = {'result': 'Error', 'message': 'page out of range'}
else:
request = {'result': 'Error', 'message': 'error during getting list'}
return JsonResponse(request)
def rate_item(request):
result = check_input(request, ["userId", "itemId", "rating"])
if result['result'] == "Error":
return JsonResponse(result)
try:
Recommendations.rate_and_remove(request['userId'], request['itemId'], request['rating'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({"result": "Error", "message": "error during rating item"})
return JsonResponse({"result": "Success", 'data': {'numberOfPages': number_of_pages}})
@csrf_exempt
def home(request):
if request.method == "POST":
try:
request_dict = json.loads(request.body)
print(request_dict)
if 'task' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'task do not presented'})
if 'data' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'data do not presented'})
if request_dict['task'] == 'addUser':
return add_user(request_dict['data'])
if request_dict['task'] == 'makeList':
return make_list(request_dict['data'])
if request_dict['task'] == 'getSuggestions':
return get_suggestions(request_dict['data'])
if request_dict['task'] == 'rateItem':
return rate_item(request_dict['data'])
return JsonResponse({'result': 'Error', 'message':
request_dict['task'] + " is not a valid task"})
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': "strange error"})
return HttpResponse('''
<h1>Welcome on GRS</h1>
''')
| false
| true
|
7904281c09f285f9f58bfe640ba09dcc99178926
| 20,018
|
py
|
Python
|
numpy_indexed/grouping.py
|
EelcoHoogendoorn/Numpy_arraysetops_EP
|
84dc8114bf8a79c3acb3f7f59128247b9fc97243
|
[
"MIT"
] | 170
|
2016-04-02T07:29:12.000Z
|
2022-03-30T02:57:15.000Z
|
numpy_indexed/grouping.py
|
EelcoHoogendoorn/Numpy_arraysetops_EP
|
84dc8114bf8a79c3acb3f7f59128247b9fc97243
|
[
"MIT"
] | 13
|
2016-08-31T14:39:51.000Z
|
2022-01-10T16:29:00.000Z
|
numpy_indexed/grouping.py
|
EelcoHoogendoorn/Numpy_arraysetops_EP
|
84dc8114bf8a79c3acb3f7f59128247b9fc97243
|
[
"MIT"
] | 19
|
2016-07-20T18:49:36.000Z
|
2021-04-16T06:38:09.000Z
|
"""grouping module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
"""
GroupBy class
contains an index of keys, and extends the index functionality with grouping-specific functionality
"""
def __init__(self, keys, axis=0):
"""
Parameters
----------
keys : indexable object
sequence of keys to group by
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
self.index = as_index(keys, axis)
#forward interesting/'public' index properties
@property
def unique(self):
"""unique keys"""
return self.index.unique
@property
def count(self):
"""count of each unique key"""
return self.index.count
@property
def inverse(self):
"""mapping such that unique[inverse]==keys"""
return self.index.inverse
@property
def groups(self):
"""int, number of groups formed by the keys"""
return self.index.groups
#some different methods of chopping up a set of values by key
def split_iterable_as_iterable(self, values):
"""Group iterable into iterables, in the order of the keys
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
Memory consumption depends on the amount of sorting required
Worst case, if index.sorter[-1] = 0, we need to consume the entire value iterable,
before we can start yielding any output
But to the extent that the keys are already sorted, the grouping is lazy
"""
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
"""Group iterable into iterables, without regard for the ordering of self.index.unique
key-group tuples are yielded as soon as they are complete
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
tuple of key, and a list of corresponding items in values
Notes
-----
This approach is lazy, insofar as grouped values are close in their iterable
"""
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
"""Group sequence into iterables
Parameters
----------
values : iterable of length equal to keys
iterable of values to be grouped
Yields
------
iterable of items in values
Notes
-----
This is the preferred method if values has random access, but we dont want it completely in memory.
Like a big memory mapped file, for instance
"""
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
"""Group ndarray into ndarray by means of reshaping
Parameters
----------
values : ndarray_like, [index.size, ...]
Returns
-------
ndarray, [groups, group_size, ...]
values grouped by key
Raises
------
AssertionError
This operation is only possible if index.uniform==True
"""
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
"""Group values as a list of arrays, or a jagged-array
Parameters
----------
values : ndarray, [keys, ...]
Returns
-------
list of length self.groups of ndarray, [key_count, ...]
"""
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
"""some sensible defaults"""
try:
return self.split_array_as_array(values)
except:
# FIXME: change to iter in python 3?
return self.split_array_as_list(values)
def __call__(self, values):
"""not sure how i feel about this. explicit is better than implict?"""
return self.unique, self.split(values)
# ufunc based reduction methods. should they return unique keys by default?
def reduce(self, values, operator=np.add, axis=0, dtype=None):
"""Reduce the values over identical key groups, using the given ufunc
reduction is over the first axis, which should have elements corresponding to the keys
all other axes are treated indepenently for the sake of this reduction
Parameters
----------
values : ndarray, [keys, ...]
values to perform reduction over
operator : numpy.ufunc
a numpy ufunc, such as np.add or np.sum
axis : int, optional
the axis to reduce over
dtype : output dtype
Returns
-------
ndarray, [groups, ...]
values reduced by operator over the key-groups
"""
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
"""compute the sum over each group
Parameters
----------
values : array_like, [keys, ...]
values to sum per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
"""compute the product over each group
Parameters
----------
values : array_like, [keys, ...]
values to multiply per group
axis : int, optional
alternative reduction axis for values
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
"""compute the mean over each group
Parameters
----------
values : array_like, [keys, ...]
values to take average of per group
axis : int, optional
alternative reduction axis for values
weights : ndarray, [keys, ...], optional
weight to use for each value
dtype : output dtype
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
"""compute the variance over each group
Parameters
----------
values : array_like, [keys, ...]
values to take variance of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
"""standard deviation over each group
Parameters
----------
values : array_like, [keys, ...]
values to take standard deviation of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
"""compute the median value over each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the median of per group
axis : int, optional
alternative reduction axis for values
average : bool, optional
when average is true, the average of the two central values is taken for groups with an even key-count
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
#need this indirection for lex-index compatibility
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
#place values at correct keys; preconditions the upcoming lexsort
slc = slc[self.index.sorter]
#refine value sorting within each keygroup
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1: #is trying to skip apply_along_axis somewhat premature optimization?
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
"""compute the mode within each group.
Parameters
----------
values : array_like, [keys, ...]
values to compute the mode of per group
weights : array_like, [keys], float, optional
optional weight associated with each entry in values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
"""return the minimum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take minimum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
"""return the maximum within each group
Parameters
----------
values : array_like, [keys, ...]
values to take maximum of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
"""return values at first occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the first value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
"""return values at last occurance of its associated key
Parameters
----------
values : array_like, [keys, ...]
values to pick the last value of per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...]
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
"""compute if any item evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
"""compute if all items evaluates to true in each group
Parameters
----------
values : array_like, [keys, ...]
values to take boolean predicate over per group
axis : int, optional
alternative reduction axis for values
Returns
-------
unique: ndarray, [groups]
unique keys
reduced : ndarray, [groups, ...], np.bool
value array, reduced over groups
"""
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
"""return the index into values corresponding to the minimum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmin of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmin : ndarray, [groups]
index into value array, representing the argmin per group
"""
keys, minima = self.min(values)
minima = minima[self.inverse]
# select the first occurence of the minimum in each group
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
"""return the index into values corresponding to the maximum value of the group
Parameters
----------
values : array_like, [keys]
values to pick the argmax of per group
Returns
-------
unique: ndarray, [groups]
unique keys
argmax : ndarray, [groups]
index into value array, representing the argmax per group
"""
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
# select the first occurence of the maximum in each group
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
#implement iter interface? could simply do zip( group_by(keys)(values)), no?
def group_by(keys, values=None, reduction=None, axis=0):
"""construct a grouping object on the given keys, optionally performing the given reduction on the given values
Parameters
----------
keys : indexable object
keys to group by
values : array_like, optional
sequence of values, of the same length as keys
if a reduction function is provided, the given values are reduced by key
if no reduction is provided, the given values are grouped and split by key
reduction : lambda, optional
reduction function to apply to the values in each group
axis : int, optional
axis to regard as the key-sequence, in case keys is multi-dimensional
Returns
-------
iterable
if values is None, a GroupBy object of the given keys object
if reduction is None, an tuple of a sequence of unique keys and a sequence of grouped values
else, a sequence of tuples of unique keys and reductions of values over that key-group
See Also
--------
numpy_indexed.as_index : for information regarding the casting rules to a valid Index object
"""
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| 32.655791
| 115
| 0.575082
|
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import itertools
import numpy as np
from numpy_indexed.index import as_index
import numpy_indexed as npi
__author__ = "Eelco Hoogendoorn"
__license__ = "LGPL"
__email__ = "hoogendoorn.eelco@gmail.com"
class GroupBy(object):
def __init__(self, keys, axis=0):
self.index = as_index(keys, axis)
@property
def unique(self):
return self.index.unique
@property
def count(self):
return self.index.count
@property
def inverse(self):
return self.index.inverse
@property
def groups(self):
return self.index.groups
def split_iterable_as_iterable(self, values):
values = iter(enumerate(values))
cache = dict()
def get_value(ti):
try:
return cache.pop(ti)
except:
while True:
i, v = next(values)
if i==ti:
return v
cache[i] = v
s = iter(self.index.sorter)
for c in self.count:
yield (get_value(i) for i in itertools.islice(s, int(c)))
def split_iterable_as_unordered_iterable(self, values):
from collections import defaultdict
cache = defaultdict(list)
count = self.count
unique = self.unique
key = (lambda i: unique[i]) if isinstance(unique, np.ndarray) else (lambda i: tuple(c[i] for c in unique))
for i,v in zip(self.inverse, values):
cache[i].append(v)
if len(cache[i]) == count[i]:
yield key(i), cache.pop(i)
def split_sequence_as_iterable(self, values):
print(self.count)
s = iter(self.index.sorter)
for c in self.count:
yield (values[i] for i in itertools.islice(s, int(c)))
def split_array_as_array(self, values):
if not self.index.uniform:
raise ValueError("Array can only be split as array if all groups have the same size")
values = np.asarray(values)
values = values[self.index.sorter]
return values.reshape(self.groups, -1, *values.shape[1:])
def split_array_as_list(self, values):
values = np.asarray(values)
values = values[self.index.sorter]
return np.split(values, self.index.slices[1:-1], axis=0)
def split(self, values):
try:
return self.split_array_as_array(values)
except:
return self.split_array_as_list(values)
def __call__(self, values):
return self.unique, self.split(values)
def reduce(self, values, operator=np.add, axis=0, dtype=None):
values = np.take(values, self.index.sorter, axis=axis)
return operator.reduceat(values, self.index.start, axis=axis, dtype=dtype)
def sum(self, values, axis=0, dtype=None):
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype)
def prod(self, values, axis=0, dtype=None):
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, dtype=dtype, operator=np.multiply)
def mean(self, values, axis=0, weights=None, dtype=None):
values = np.asarray(values)
if weights is None:
result = self.reduce(values, axis=axis, dtype=dtype)
shape = [1] * values.ndim
shape[axis] = self.groups
weights = self.count.reshape(shape)
else:
weights = np.asarray(weights)
result = self.reduce(values * weights, axis=axis, dtype=dtype)
weights = self.reduce(weights, axis=axis, dtype=dtype)
return self.unique, result / weights
def var(self, values, axis=0, weights=None, dtype=None):
values = np.asarray(values)
unique, mean = self.mean(values, axis, weights, dtype)
err = values - mean.take(self.inverse, axis)
if weights is None:
shape = [1] * values.ndim
shape[axis] = self.groups
group_weights = self.count.reshape(shape)
var = self.reduce(err ** 2, axis=axis, dtype=dtype)
else:
weights = np.asarray(weights)
group_weights = self.reduce(weights, axis=axis, dtype=dtype)
var = self.reduce(weights * err ** 2, axis=axis, dtype=dtype)
return unique, var / group_weights
def std(self, values, axis=0, weights=None, dtype=None):
unique, var = self.var(values, axis, weights, dtype)
return unique, np.sqrt(var)
def median(self, values, axis=0, average=True):
mid_2 = self.index.start + self.index.stop
hi = (mid_2 ) // 2
lo = (mid_2 - 1) // 2
sorted_group_rank_per_key = self.index.sorted_group_rank_per_key
def median1d(slc):
slc = slc[self.index.sorter]
sorter = np.lexsort((slc, sorted_group_rank_per_key))
slc = slc[sorter]
return (slc[lo]+slc[hi]) / 2 if average else slc[hi]
values = np.asarray(values)
if values.ndim>1:
values = np.apply_along_axis(median1d, axis, values)
else:
values = median1d(values)
return self.unique, values
def mode(self, values, weights=None):
if weights is None:
unique, weights = npi.count((self.index.sorted_group_rank_per_key, values))
else:
unique, weights = npi.group_by((self.index.sorted_group_rank_per_key, values)).sum(weights)
x, bin = npi.group_by(unique[0]).argmax(weights)
return x, unique[1][bin]
def min(self, values, axis=0):
values = np.asarray(values)
return self.unique, self.reduce(values, np.minimum, axis)
def max(self, values, axis=0):
values = np.asarray(values)
return self.unique, self.reduce(values, np.maximum, axis)
def first(self, values, axis=0):
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.start], axis)
def last(self, values, axis=0):
values = np.asarray(values)
return self.unique, np.take(values, self.index.sorter[self.index.stop-1], axis)
def any(self, values, axis=0):
values = np.asarray(values)
if not values.dtype == np.bool:
values = values != 0
return self.unique, self.reduce(values, axis=axis) > 0
def all(self, values, axis=0):
values = np.asarray(values)
return self.unique, self.reduce(values, axis=axis, operator=np.multiply) != 0
def argmin(self, values):
keys, minima = self.min(values)
minima = minima[self.inverse]
index = as_index((self.inverse, values == minima))
return keys, index.sorter[index.start[-self.groups:]]
def argmax(self, values):
keys, maxima = self.max(values)
maxima = maxima[self.inverse]
index = as_index((self.inverse, values == maxima))
return keys, index.sorter[index.start[-self.groups:]]
def group_by(keys, values=None, reduction=None, axis=0):
g = GroupBy(keys, axis)
if values is None:
return g
groups = g.split(values)
if reduction is None:
return g.unique, groups
return [(key, reduction(group)) for key, group in zip(g.unique, groups)]
__all__ = ['group_by']
| true
| true
|
7904289e1b1ebfa4e3bf17c3f23e97055297fb93
| 287
|
py
|
Python
|
timezone_field/tests/models.py
|
ambitioninc/django-timezone-field
|
52ac4903b89700474bee18a8728e91974f246faa
|
[
"BSD-2-Clause"
] | null | null | null |
timezone_field/tests/models.py
|
ambitioninc/django-timezone-field
|
52ac4903b89700474bee18a8728e91974f246faa
|
[
"BSD-2-Clause"
] | 1
|
2018-12-11T15:49:22.000Z
|
2018-12-11T15:49:22.000Z
|
timezone_field/tests/models.py
|
ambitioninc/django-timezone-field
|
52ac4903b89700474bee18a8728e91974f246faa
|
[
"BSD-2-Clause"
] | 2
|
2017-09-19T19:27:55.000Z
|
2017-11-21T11:31:35.000Z
|
from __future__ import unicode_literals
from django.db import models
from timezone_field import TimeZoneField
class FakeModel(models.Model):
tz = TimeZoneField()
tz_opt = TimeZoneField(blank=True)
tz_opt_default = TimeZoneField(blank=True, default='America/Los_Angeles')
| 23.916667
| 77
| 0.787456
|
from __future__ import unicode_literals
from django.db import models
from timezone_field import TimeZoneField
class FakeModel(models.Model):
tz = TimeZoneField()
tz_opt = TimeZoneField(blank=True)
tz_opt_default = TimeZoneField(blank=True, default='America/Los_Angeles')
| true
| true
|
79042977dfb8c5d49992dddc323d6c55b868b944
| 475
|
py
|
Python
|
instagramHome/froms.py
|
Irene-nandy/Instagram
|
0032cc40cef86f37b602907b319f6b6e49695e44
|
[
"MIT"
] | null | null | null |
instagramHome/froms.py
|
Irene-nandy/Instagram
|
0032cc40cef86f37b602907b319f6b6e49695e44
|
[
"MIT"
] | null | null | null |
instagramHome/froms.py
|
Irene-nandy/Instagram
|
0032cc40cef86f37b602907b319f6b6e49695e44
|
[
"MIT"
] | null | null | null |
from django import forms
class PostForm(forms.Form):
image = forms.ImageField()
image_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control","placeholder": "Image Name"}))
image_caption = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Image Caption"}))
class CommentForm(forms.Form):
body = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Leave a comment!"}))
| 47.5
| 122
| 0.726316
|
from django import forms
class PostForm(forms.Form):
image = forms.ImageField()
image_name = forms.CharField(widget=forms.TextInput(attrs={"class": "form-control","placeholder": "Image Name"}))
image_caption = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Image Caption"}))
class CommentForm(forms.Form):
body = forms.CharField(widget=forms.Textarea(attrs={"class": "form-control","placeholder": "Leave a comment!"}))
| true
| true
|
790429bdadb16eceded5a4505fd1054e8b272e56
| 3,138
|
py
|
Python
|
app/requests.py
|
ClarisseU/newsHighlight
|
7e7cbc8ff6c6e512a6758359b4706a8a48093926
|
[
"MIT"
] | null | null | null |
app/requests.py
|
ClarisseU/newsHighlight
|
7e7cbc8ff6c6e512a6758359b4706a8a48093926
|
[
"MIT"
] | null | null | null |
app/requests.py
|
ClarisseU/newsHighlight
|
7e7cbc8ff6c6e512a6758359b4706a8a48093926
|
[
"MIT"
] | null | null | null |
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
#Getting api key
api_key = None
#Getting the news base url
# NEWS_API_KEY = None
# NEWS_API_BASE_URL = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
'''
function that gets the json response to our url request
'''
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
def process_sources(sources_list):
'''
Function that checks the news results and turn them into objects
Args:
sources_list: A list of dictionaries that contain sources details
'''
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
def get_articles(id):
'''
Function that processes the articles and returns a list of articles objects
'''
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
'''
function that checks the articles and processes them into instances
'''
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
| 30.466019
| 79
| 0.658062
|
import urllib.request,json
from .models import Sources, Articles
from datetime import datetime
api_key = None
ARTICLE = None
def configure_request(app):
global api_key,NEWS_API_BASE_URL,NEWS_API_KEY,ARTICLE
api_key = app.config['NEWS_API_KEY']
ARTICLE = app.config['ARTICLE']
NEWS_API_BASE_URL = app.config['NEWS_API_BASE_URL']
NEWS_API_KEY = app.config['NEWS_API_KEY']
def get_source(category):
get_source_url = NEWS_API_BASE_URL.format(category,api_key)
print(get_source_url)
with urllib.request.urlopen(get_source_url) as url:
get_source_data = url.read()
get_source_response = json.loads(get_source_data)
sources_result = None
if get_source_response['sources']:
sources_results_list = get_source_response['sources']
sources_result = process_sources(sources_results_list)
print(sources_result)
return sources_result
def process_sources(sources_list):
sources_result = []
for source_item in sources_list:
author = source_item.get('author')
title = source_item.get('title')
imageurl = source_item.get('urltoimage')
description = source_item.get('description')
url = source_item.get('url')
id = source_item.get('id')
sources_object = Sources(author, title,imageurl,description,url,id)
sources_result.append(sources_object)
return sources_result
def get_articles(id):
get_articles_url = ARTICLE.format(id,api_key)
print(get_articles_url)
with urllib.request.urlopen(get_articles_url) as url:
article_data = url.read()
articles_response = json.loads(article_data)
articles_object = None
if articles_response['articles']:
response_list= articles_response['articles']
articles_object = process_articles(response_list)
return articles_object
def process_articles(articles_list):
articles_object = []
for article_item in articles_list:
author = article_item.get('name')
title = article_item.get('title')
description = article_item.get('description')
url = article_item.get('url')
image = article_item.get('urlToImage')
date = article_item.get('publishedAt')
if image:
articles_result = Articles(author,title,description,url,image,date)
articles_object.append(articles_result)
return articles_object
| true
| true
|
790429ce7a101861daf7bce45c1b77c76fd27d8c
| 130
|
py
|
Python
|
release/scripts/presets/tracking_track_color/near_plane.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/presets/tracking_track_color/near_plane.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/presets/tracking_track_color/near_plane.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
import bpy
track = bpy.context.edit_movieclip.tracking.tracks.active
track.color = (0.0, 1.0, 0.0)
track.use_custom_color = True
| 21.666667
| 57
| 0.761538
|
import bpy
track = bpy.context.edit_movieclip.tracking.tracks.active
track.color = (0.0, 1.0, 0.0)
track.use_custom_color = True
| true
| true
|
79042b5b717aed371620bf2d3fd531e504b78b37
| 940
|
py
|
Python
|
PythonScripting/CSVFiles/examples3_csvmodule.py
|
Neo-sunny/pythonProgs
|
a9d2359d8a09d005d0ba6f94d7d256bf91499793
|
[
"MIT"
] | 622
|
2018-07-17T09:05:41.000Z
|
2022-03-29T02:57:02.000Z
|
Rice-Python-Data-Analysis/week3/examples3_csvmodule.py
|
thientvse/Courses-
|
263ff4680ed1dfd253be3652f7f13ad707af1a36
|
[
"Apache-2.0"
] | 21
|
2019-11-10T02:06:09.000Z
|
2022-01-22T23:54:11.000Z
|
Rice-Python-Data-Analysis/week3/examples3_csvmodule.py
|
thientvse/Courses-
|
263ff4680ed1dfd253be3652f7f13ad707af1a36
|
[
"Apache-2.0"
] | 906
|
2018-07-17T09:05:43.000Z
|
2022-03-31T12:55:49.000Z
|
"""
Using the csv module.
"""
import csv
def parse(csvfilename):
"""
Reads CSV file named csvfilename, parses
it's content and returns the data within
the file as a list of lists.
"""
table = []
with open(csvfilename, "r") as csvfile:
csvreader = csv.reader(csvfile,
skipinitialspace=True)
for row in csvreader:
table.append(row)
return table
def print_table(table):
"""
Print out table, which must be a list
of lists, in a nicely formatted way.
"""
for row in table:
# Header column left justified
print("{:<19}".format(row[0]), end='')
# Remaining columns right justified
for col in row[1:]:
print("{:>4}".format(col), end='')
print("", end='\n')
table = parse("hightemp.csv")
print_table(table)
print("")
print("")
table2 = parse("hightemp2.csv")
print_table(table2)
| 21.860465
| 53
| 0.578723
|
import csv
def parse(csvfilename):
table = []
with open(csvfilename, "r") as csvfile:
csvreader = csv.reader(csvfile,
skipinitialspace=True)
for row in csvreader:
table.append(row)
return table
def print_table(table):
for row in table:
print("{:<19}".format(row[0]), end='')
for col in row[1:]:
print("{:>4}".format(col), end='')
print("", end='\n')
table = parse("hightemp.csv")
print_table(table)
print("")
print("")
table2 = parse("hightemp2.csv")
print_table(table2)
| true
| true
|
79042b5fa45293fe7b76a7e810523136746912f9
| 3,028
|
py
|
Python
|
doc/extensions/empy_helpers/__init__.py
|
SirArep/ecal
|
9860efeb4ce0ef168630136d33947da02ecf0490
|
[
"Apache-2.0"
] | 493
|
2019-06-03T13:30:46.000Z
|
2022-03-26T16:18:57.000Z
|
doc/extensions/empy_helpers/__init__.py
|
SirArep/ecal
|
9860efeb4ce0ef168630136d33947da02ecf0490
|
[
"Apache-2.0"
] | 249
|
2019-06-04T09:01:24.000Z
|
2022-03-31T23:37:39.000Z
|
doc/extensions/empy_helpers/__init__.py
|
SirArep/ecal
|
9860efeb4ce0ef168630136d33947da02ecf0490
|
[
"Apache-2.0"
] | 114
|
2019-06-05T00:04:25.000Z
|
2022-03-22T10:22:04.000Z
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is borrowed from ros2/rosidl:
# https://github.com/ros2/rosidl/blob/master/rosidl_adapter/rosidl_adapter/resource/__init__.py
# Slight modifications were made, so proper paths to files are accepted.
from io import StringIO
import os
import sys
import em
def expand_template(template_name, data, output_file, encoding='utf-8'):
content = evaluate_template(template_name, data)
if output_file.exists():
existing_content = output_file.read_text(encoding=encoding)
if existing_content == content:
return
elif output_file.parent:
os.makedirs(str(output_file.parent), exist_ok=True)
output_file.write_text(content, encoding=encoding)
_interpreter = None
def evaluate_template(template_name, data):
global _interpreter
# create copy before manipulating
data = dict(data)
data['TEMPLATE'] = _evaluate_template
#template_path = os.path.join(os.path.dirname(__file__), template_name)
template_path = template_name
output = StringIO()
try:
_interpreter = em.Interpreter(
output=output,
options={
em.BUFFERED_OPT: True,
em.RAW_OPT: True,
})
with open(template_path, 'r') as h:
content = h.read()
_interpreter.invoke(
'beforeFile', name=template_name, file=h, locals=data)
_interpreter.string(content, template_path, locals=data)
_interpreter.invoke('afterFile')
return output.getvalue()
except Exception as e: # noqa: F841
print(
f"{e.__class__.__name__} processing template '{template_name}'",
file=sys.stderr)
raise
finally:
_interpreter.shutdown()
_interpreter = None
def _evaluate_template(template_name, **kwargs):
global _interpreter
#template_path = os.path.join(os.path.dirname(__file__), template_name)
template_path = template_name
with open(template_path, 'r') as h:
_interpreter.invoke(
'beforeInclude', name=template_path, file=h, locals=kwargs)
content = h.read()
try:
_interpreter.string(content, template_path, kwargs)
except Exception as e: # noqa: F841
print(
f"{e.__class__.__name__} processing template '{template_name}': "
f'{e}', file=sys.stderr)
sys.exit(1)
_interpreter.invoke('afterInclude')
| 32.55914
| 95
| 0.679326
|
from io import StringIO
import os
import sys
import em
def expand_template(template_name, data, output_file, encoding='utf-8'):
content = evaluate_template(template_name, data)
if output_file.exists():
existing_content = output_file.read_text(encoding=encoding)
if existing_content == content:
return
elif output_file.parent:
os.makedirs(str(output_file.parent), exist_ok=True)
output_file.write_text(content, encoding=encoding)
_interpreter = None
def evaluate_template(template_name, data):
global _interpreter
data = dict(data)
data['TEMPLATE'] = _evaluate_template
template_path = template_name
output = StringIO()
try:
_interpreter = em.Interpreter(
output=output,
options={
em.BUFFERED_OPT: True,
em.RAW_OPT: True,
})
with open(template_path, 'r') as h:
content = h.read()
_interpreter.invoke(
'beforeFile', name=template_name, file=h, locals=data)
_interpreter.string(content, template_path, locals=data)
_interpreter.invoke('afterFile')
return output.getvalue()
except Exception as e:
print(
f"{e.__class__.__name__} processing template '{template_name}'",
file=sys.stderr)
raise
finally:
_interpreter.shutdown()
_interpreter = None
def _evaluate_template(template_name, **kwargs):
global _interpreter
template_path = template_name
with open(template_path, 'r') as h:
_interpreter.invoke(
'beforeInclude', name=template_path, file=h, locals=kwargs)
content = h.read()
try:
_interpreter.string(content, template_path, kwargs)
except Exception as e:
print(
f"{e.__class__.__name__} processing template '{template_name}': "
f'{e}', file=sys.stderr)
sys.exit(1)
_interpreter.invoke('afterInclude')
| true
| true
|
79042c25cc0b73c482b1669a91d9d0f5c9949028
| 775
|
py
|
Python
|
setup.py
|
jinnovation/metaflow
|
540f21133b08108f7129ce42b1c6a24fd9175b2f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jinnovation/metaflow
|
540f21133b08108f7129ce42b1c6a24fd9175b2f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
jinnovation/metaflow
|
540f21133b08108f7129ce42b1c6a24fd9175b2f
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
version = "2.5.4"
setup(
include_package_data=True,
name="metaflow",
version=version,
description="Metaflow: More Data Science, Less Engineering",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Machine Learning Infrastructure Team at Netflix",
author_email="help@metaflow.org",
license="Apache License 2.0",
packages=find_packages(exclude=["metaflow_test"]),
py_modules=[
"metaflow",
],
package_data={"metaflow": ["tutorials/*/*"]},
entry_points="""
[console_scripts]
metaflow=metaflow.main_cli:main
""",
install_requires=[
"requests",
"boto3",
"pylint",
],
)
| 25.833333
| 64
| 0.645161
|
from setuptools import setup, find_packages
version = "2.5.4"
setup(
include_package_data=True,
name="metaflow",
version=version,
description="Metaflow: More Data Science, Less Engineering",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Machine Learning Infrastructure Team at Netflix",
author_email="help@metaflow.org",
license="Apache License 2.0",
packages=find_packages(exclude=["metaflow_test"]),
py_modules=[
"metaflow",
],
package_data={"metaflow": ["tutorials/*/*"]},
entry_points="""
[console_scripts]
metaflow=metaflow.main_cli:main
""",
install_requires=[
"requests",
"boto3",
"pylint",
],
)
| true
| true
|
79042e003a80fc0d1ea08474e610faa627656872
| 7,294
|
py
|
Python
|
myvenv/lib/python3.5/site-packages/babel/messages/mofile.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/Babel-2.5.1/babel/messages/mofile.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/Babel-2.5.1/babel/messages/mofile.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# -*- coding: utf-8 -*-
"""
babel.messages.mofile
~~~~~~~~~~~~~~~~~~~~~
Writing of files in the ``gettext`` MO (machine object) format.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import array
import struct
from babel.messages.catalog import Catalog, Message
from babel._compat import range_type, array_tobytes
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def read_mo(fileobj):
"""Read a binary MO file from the given file-like object and return a
corresponding `Catalog` object.
:param fileobj: the file-like object to read the MO file from
:note: The implementation of this function is heavily based on the
``GNUTranslations._parse`` method of the ``gettext`` module in the
standard library.
"""
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
magic = unpack('<I', buf[:4])[0] # Are we big endian or little endian?
if magic == LE_MAGIC:
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == BE_MAGIC:
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary
for i in range_type(0, msgcount):
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if b':' in item:
key, value = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += b'\n' + item
if b'\x04' in msg: # context
ctxt, msg = msg.split(b'\x04')
else:
ctxt = None
if b'\x00' in msg: # plural forms
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
else:
if catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
# advance to next entry in the seek tables
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog
def write_mo(fileobj, catalog, use_fuzzy=False):
"""Write a catalog to the specified file-like object using the GNU MO file
format.
>>> import sys
>>> from babel.messages import Catalog
>>> from gettext import GNUTranslations
>>> from babel._compat import BytesIO
>>> catalog = Catalog(locale='en_US')
>>> catalog.add('foo', 'Voh')
<Message ...>
>>> catalog.add((u'bar', u'baz'), (u'Bahr', u'Batz'))
<Message ...>
>>> catalog.add('fuz', 'Futz', flags=['fuzzy'])
<Message ...>
>>> catalog.add('Fizz', '')
<Message ...>
>>> catalog.add(('Fuzz', 'Fuzzes'), ('', ''))
<Message ...>
>>> buf = BytesIO()
>>> write_mo(buf, catalog)
>>> x = buf.seek(0)
>>> translations = GNUTranslations(fp=buf)
>>> if sys.version_info[0] >= 3:
... translations.ugettext = translations.gettext
... translations.ungettext = translations.ngettext
>>> translations.ugettext('foo')
u'Voh'
>>> translations.ungettext('bar', 'baz', 1)
u'Bahr'
>>> translations.ungettext('bar', 'baz', 2)
u'Batz'
>>> translations.ugettext('fuz')
u'fuz'
>>> translations.ugettext('Fizz')
u'Fizz'
>>> translations.ugettext('Fuzz')
u'Fuzz'
>>> translations.ugettext('Fuzzes')
u'Fuzzes'
:param fileobj: the file-like object to write to
:param catalog: the `Catalog` instance
:param use_fuzzy: whether translations marked as "fuzzy" should be included
in the output
"""
messages = list(catalog)
if not use_fuzzy:
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
messages.sort()
ids = strs = b''
offsets = []
for message in messages:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
if message.pluralizable:
msgid = b'\x00'.join([
msgid.encode(catalog.charset) for msgid in message.id
])
msgstrs = []
for idx, string in enumerate(message.string):
if not string:
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([
msgstr.encode(catalog.charset) for msgstr in msgstrs
])
else:
msgid = message.id.encode(catalog.charset)
if not message.string:
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset),
msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b'\x00'
strs += msgstr + b'\x00'
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
keystart = 7 * 4 + 16 * len(messages)
valuestart = keystart + len(ids)
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
koffsets = []
voffsets = []
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
fileobj.write(struct.pack('Iiiiiii',
LE_MAGIC, # magic
0, # version
len(messages), # number of entries
7 * 4, # start of key index
7 * 4 + len(messages) * 8, # start of value index
0, 0 # size and offset of hash table
) + array_tobytes(array.array("i", offsets)) + ids + strs)
| 34.084112
| 89
| 0.541815
|
import array
import struct
from babel.messages.catalog import Catalog, Message
from babel._compat import range_type, array_tobytes
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def read_mo(fileobj):
catalog = Catalog()
headers = {}
filename = getattr(fileobj, 'name', '')
buf = fileobj.read()
buflen = len(buf)
unpack = struct.unpack
magic = unpack('<I', buf[:4])[0]
if magic == LE_MAGIC:
version, msgcount, origidx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == BE_MAGIC:
version, msgcount, origidx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
for i in range_type(0, msgcount):
mlen, moff = unpack(ii, buf[origidx:origidx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
if mlen == 0:
# Catalog description
lastkey = key = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if b':' in item:
key, value = item.split(b':', 1)
lastkey = key = key.strip().lower()
headers[key] = value.strip()
elif lastkey:
headers[lastkey] += b'\n' + item
if b'\x04' in msg: # context
ctxt, msg = msg.split(b'\x04')
else:
ctxt = None
if b'\x00' in msg: # plural forms
msg = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
if catalog.charset:
msg = [x.decode(catalog.charset) for x in msg]
tmsg = [x.decode(catalog.charset) for x in tmsg]
else:
if catalog.charset:
msg = msg.decode(catalog.charset)
tmsg = tmsg.decode(catalog.charset)
catalog[msg] = Message(msg, tmsg, context=ctxt)
# advance to next entry in the seek tables
origidx += 8
transidx += 8
catalog.mime_headers = headers.items()
return catalog
def write_mo(fileobj, catalog, use_fuzzy=False):
messages = list(catalog)
if not use_fuzzy:
messages[1:] = [m for m in messages[1:] if not m.fuzzy]
messages.sort()
ids = strs = b''
offsets = []
for message in messages:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
if message.pluralizable:
msgid = b'\x00'.join([
msgid.encode(catalog.charset) for msgid in message.id
])
msgstrs = []
for idx, string in enumerate(message.string):
if not string:
msgstrs.append(message.id[min(int(idx), 1)])
else:
msgstrs.append(string)
msgstr = b'\x00'.join([
msgstr.encode(catalog.charset) for msgstr in msgstrs
])
else:
msgid = message.id.encode(catalog.charset)
if not message.string:
msgstr = message.id.encode(catalog.charset)
else:
msgstr = message.string.encode(catalog.charset)
if message.context:
msgid = b'\x04'.join([message.context.encode(catalog.charset),
msgid])
offsets.append((len(ids), len(msgid), len(strs), len(msgstr)))
ids += msgid + b'\x00'
strs += msgstr + b'\x00'
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
keystart = 7 * 4 + 16 * len(messages)
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1 + keystart]
voffsets += [l2, o2 + valuestart]
offsets = koffsets + voffsets
fileobj.write(struct.pack('Iiiiiii',
LE_MAGIC,
0,
len(messages),
7 * 4,
7 * 4 + len(messages) * 8,
0, 0
) + array_tobytes(array.array("i", offsets)) + ids + strs)
| true
| true
|
79042e70f30245770d9db6b182ee23c020d301ec
| 7,960
|
py
|
Python
|
utils/lib_classifier.py
|
eddylamhw/trAIner24
|
ac7cf1b95a2ecdfc44d11451984b016524ed7657
|
[
"MIT"
] | 1
|
2021-11-25T16:32:51.000Z
|
2021-11-25T16:32:51.000Z
|
utils/lib_classifier.py
|
eddylamhw/trAIner24
|
ac7cf1b95a2ecdfc44d11451984b016524ed7657
|
[
"MIT"
] | null | null | null |
utils/lib_classifier.py
|
eddylamhw/trAIner24
|
ac7cf1b95a2ecdfc44d11451984b016524ed7657
|
[
"MIT"
] | null | null | null |
'''
This script includes:
1. ClassifierOfflineTrain
This is for offline training. The input data are the processed features.
2. class ClassifierOnlineTest(object)
This is for online testing. The input data are the raw skeletons.
It uses FeatureGenerator to extract features,
and then use ClassifierOfflineTrain to recognize the action.
Notice, this model is only for recognizing the action of one person.
TODO: Add more comments to this function.
'''
import numpy as np
import sys
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from collections import deque
import cv2
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.decomposition import PCA
if True:
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
sys.path.append(ROOT)
from utils.lib_feature_proc import FeatureGenerator
# -- Settings
NUM_FEATURES_FROM_PCA = 50
# -- Classes
class ClassifierOfflineTrain(object):
''' The classifer for offline training.
The input features to this classifier are already
processed by `class FeatureGenerator`.
'''
def __init__(self):
self._init_all_models()
# self.clf = self._choose_model("Nearest Neighbors")
# self.clf = self._choose_model("Linear SVM")
# self.clf = self._choose_model("RBF SVM")
# self.clf = self._choose_model("Gaussian Process")
# self.clf = self._choose_model("Decision Tree")
# self.clf = self._choose_model("Random Forest")
self.clf = self._choose_model("Neural Net")
def predict(self, X):
''' Predict the class index of the feature X '''
Y_predict = self.clf.predict(self.pca.transform(X))
return Y_predict
def predict_and_evaluate(self, te_X, te_Y):
''' Test model on test set and obtain accuracy '''
te_Y_predict = self.predict(te_X)
N = len(te_Y)
n = sum(te_Y_predict == te_Y)
accu = n / N
return accu, te_Y_predict
def train(self, X, Y):
''' Train model. The result is saved into self.clf '''
n_components = min(NUM_FEATURES_FROM_PCA, X.shape[1])
self.pca = PCA(n_components=n_components, whiten=True)
self.pca.fit(X)
# print("Sum eig values:", np.sum(self.pca.singular_values_))
print("Sum eig values:", np.sum(self.pca.explained_variance_ratio_))
X_new = self.pca.transform(X)
print("After PCA, X.shape = ", X_new.shape)
self.clf.fit(X_new, Y)
def _choose_model(self, name):
self.model_name = name
idx = self.names.index(name)
return self.classifiers[idx]
def _init_all_models(self):
self.names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
self.model_name = None
self.classifiers = [
KNeighborsClassifier(5),
SVC(kernel="linear", C=10.0),
SVC(gamma=0.01, C=1.0, verbose=True),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(
max_depth=30, n_estimators=100, max_features="auto"),
MLPClassifier((20, 30, 40)), # Neural Net
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
def _predict_proba(self, X):
''' Predict the probability of feature X belonging to each of the class Y[i] '''
Y_probs = self.clf.predict_proba(self.pca.transform(X))
return Y_probs # np.array with a length of len(classes)
class ClassifierOnlineTest(object):
''' Classifier for online inference.
The input data to this classifier is the raw skeleton data, so they
are processed by `class FeatureGenerator` before sending to the
self.model trained by `class ClassifierOfflineTrain`.
'''
def __init__(self, model_path, action_labels, window_size, human_id=0):
# -- Settings
self.human_id = human_id
with open(model_path, 'rb') as f:
self.model = pickle.load(f)
if self.model is None:
print("my Error: failed to load model")
assert False
self.action_labels = action_labels
self.THRESHOLD_SCORE_FOR_DISP = 0.5
# -- Time serials storage
self.feature_generator = FeatureGenerator(window_size)
self.reset()
def reset(self):
self.feature_generator.reset()
self.scores_hist = deque()
self.scores = None
def predict(self, skeleton):
''' Predict the class (string) of the input raw skeleton '''
LABEL_UNKNOWN = ""
is_features_good, features = self.feature_generator.add_cur_skeleton(
skeleton)
if is_features_good:
# convert to 2d array
features = features.reshape(-1, features.shape[0])
curr_scores = self.model._predict_proba(features)[0]
self.scores = self.smooth_scores(curr_scores)
if self.scores.max() < self.THRESHOLD_SCORE_FOR_DISP: # If lower than threshold, bad
prediced_label = LABEL_UNKNOWN
else:
predicted_idx = self.scores.argmax()
prediced_label = self.action_labels[predicted_idx]
else:
prediced_label = LABEL_UNKNOWN
return prediced_label
def smooth_scores(self, curr_scores):
''' Smooth the current prediction score
by taking the average with previous scores
'''
self.scores_hist.append(curr_scores)
DEQUE_MAX_SIZE = 2
if len(self.scores_hist) > DEQUE_MAX_SIZE:
self.scores_hist.popleft()
if 1: # Use sum
score_sums = np.zeros((len(self.action_labels),))
for score in self.scores_hist:
score_sums += score
score_sums /= len(self.scores_hist)
print("\nMean score:\n", score_sums)
return score_sums
else: # Use multiply
score_mul = np.ones((len(self.action_labels),))
for score in self.scores_hist:
score_mul *= score
return score_mul
def draw_scores_onto_image(self, img_disp):
if self.scores is None:
return
for i in range(len(self.action_labels)):
FONT_SIZE = 0.6
TXT_X = 20
TXT_Y = 150 + i*30
COLOR_INTENSITY = 255
#if i == -1:
# s = "{}".format(self.human_id)
#else:
#label = self.action_labels[i]
#s = "{:<5}: {:.2f}".format(label, self.scores[i])
#COLOR_INTENSITY *= (0.0 + 1.0 * self.scores[i])**0.5
if i!=-1:
label = self.action_labels[i]
s = "{:<5}: {:.2f}".format(label, self.scores[i])
COLOR_INTENSITY *= (0.0 + 1.0 * self.scores[i])**0.5
cv2.putText(img_disp, text=s, org=(TXT_X, TXT_Y),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=FONT_SIZE,
color=(0, 0, int(COLOR_INTENSITY)), thickness=2)
| 35.855856
| 97
| 0.629899
|
import numpy as np
import sys
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from collections import deque
import cv2
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.decomposition import PCA
if True:
import sys
import os
ROOT = os.path.dirname(os.path.abspath(__file__))+"/../"
sys.path.append(ROOT)
from utils.lib_feature_proc import FeatureGenerator
NUM_FEATURES_FROM_PCA = 50
class ClassifierOfflineTrain(object):
def __init__(self):
self._init_all_models()
self.clf = self._choose_model("Neural Net")
def predict(self, X):
Y_predict = self.clf.predict(self.pca.transform(X))
return Y_predict
def predict_and_evaluate(self, te_X, te_Y):
te_Y_predict = self.predict(te_X)
N = len(te_Y)
n = sum(te_Y_predict == te_Y)
accu = n / N
return accu, te_Y_predict
def train(self, X, Y):
n_components = min(NUM_FEATURES_FROM_PCA, X.shape[1])
self.pca = PCA(n_components=n_components, whiten=True)
self.pca.fit(X)
print("Sum eig values:", np.sum(self.pca.explained_variance_ratio_))
X_new = self.pca.transform(X)
print("After PCA, X.shape = ", X_new.shape)
self.clf.fit(X_new, Y)
def _choose_model(self, name):
self.model_name = name
idx = self.names.index(name)
return self.classifiers[idx]
def _init_all_models(self):
self.names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
self.model_name = None
self.classifiers = [
KNeighborsClassifier(5),
SVC(kernel="linear", C=10.0),
SVC(gamma=0.01, C=1.0, verbose=True),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(
max_depth=30, n_estimators=100, max_features="auto"),
MLPClassifier((20, 30, 40)),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
def _predict_proba(self, X):
Y_probs = self.clf.predict_proba(self.pca.transform(X))
return Y_probs
class ClassifierOnlineTest(object):
def __init__(self, model_path, action_labels, window_size, human_id=0):
self.human_id = human_id
with open(model_path, 'rb') as f:
self.model = pickle.load(f)
if self.model is None:
print("my Error: failed to load model")
assert False
self.action_labels = action_labels
self.THRESHOLD_SCORE_FOR_DISP = 0.5
self.feature_generator = FeatureGenerator(window_size)
self.reset()
def reset(self):
self.feature_generator.reset()
self.scores_hist = deque()
self.scores = None
def predict(self, skeleton):
LABEL_UNKNOWN = ""
is_features_good, features = self.feature_generator.add_cur_skeleton(
skeleton)
if is_features_good:
features = features.reshape(-1, features.shape[0])
curr_scores = self.model._predict_proba(features)[0]
self.scores = self.smooth_scores(curr_scores)
if self.scores.max() < self.THRESHOLD_SCORE_FOR_DISP:
prediced_label = LABEL_UNKNOWN
else:
predicted_idx = self.scores.argmax()
prediced_label = self.action_labels[predicted_idx]
else:
prediced_label = LABEL_UNKNOWN
return prediced_label
def smooth_scores(self, curr_scores):
self.scores_hist.append(curr_scores)
DEQUE_MAX_SIZE = 2
if len(self.scores_hist) > DEQUE_MAX_SIZE:
self.scores_hist.popleft()
if 1:
score_sums = np.zeros((len(self.action_labels),))
for score in self.scores_hist:
score_sums += score
score_sums /= len(self.scores_hist)
print("\nMean score:\n", score_sums)
return score_sums
else:
score_mul = np.ones((len(self.action_labels),))
for score in self.scores_hist:
score_mul *= score
return score_mul
def draw_scores_onto_image(self, img_disp):
if self.scores is None:
return
for i in range(len(self.action_labels)):
FONT_SIZE = 0.6
TXT_X = 20
TXT_Y = 150 + i*30
COLOR_INTENSITY = 255
if i!=-1:
label = self.action_labels[i]
s = "{:<5}: {:.2f}".format(label, self.scores[i])
COLOR_INTENSITY *= (0.0 + 1.0 * self.scores[i])**0.5
cv2.putText(img_disp, text=s, org=(TXT_X, TXT_Y),
fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=FONT_SIZE,
color=(0, 0, int(COLOR_INTENSITY)), thickness=2)
| true
| true
|
79042f677f4accd9f4c0858e4c01d2f06341605e
| 1,359
|
py
|
Python
|
neural_compressor/ux/web/service/optimization.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 172
|
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
neural_compressor/ux/web/service/optimization.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40
|
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
neural_compressor/ux/web/service/optimization.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33
|
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Optimization service."""
from neural_compressor.ux.components.db_manager.db_operations import OptimizationAPIInterface
from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor
from neural_compressor.ux.web.service.workload import WorkloadService
class OptimizationService(WorkloadService):
"""Optimization related services."""
@staticmethod
def _get_workload_data(data: dict) -> dict:
"""Return data for requested Workload."""
optimization_id = RequestDataProcessor.get_string_value(data, "id")
optimization_data = OptimizationAPIInterface.get_optimization_details(
{
"id": optimization_id,
},
)
return optimization_data
| 38.828571
| 93
| 0.740986
|
from neural_compressor.ux.components.db_manager.db_operations import OptimizationAPIInterface
from neural_compressor.ux.web.service.request_data_processor import RequestDataProcessor
from neural_compressor.ux.web.service.workload import WorkloadService
class OptimizationService(WorkloadService):
@staticmethod
def _get_workload_data(data: dict) -> dict:
optimization_id = RequestDataProcessor.get_string_value(data, "id")
optimization_data = OptimizationAPIInterface.get_optimization_details(
{
"id": optimization_id,
},
)
return optimization_data
| true
| true
|
79043116cb4ef86e78c4d61d4d638ab85ee33161
| 19,551
|
py
|
Python
|
lib/dataset/pascal_voc.py
|
alphadadajuju/Deep-Feature-Flow-mod
|
1df87d923c50722f508897710f32974b8a0c510f
|
[
"MIT"
] | null | null | null |
lib/dataset/pascal_voc.py
|
alphadadajuju/Deep-Feature-Flow-mod
|
1df87d923c50722f508897710f32974b8a0c510f
|
[
"MIT"
] | null | null | null |
lib/dataset/pascal_voc.py
|
alphadadajuju/Deep-Feature-Flow-mod
|
1df87d923c50722f508897710f32974b8a0c510f
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Haozhi Qi, from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# --------------------------------------------------------
"""
Pascal VOC database
This class loads ground truth notations from standard Pascal VOC XML data formats
and transform them into IMDB format. Selective search is used for proposals, see roidb
function. Results are written as the Pascal VOC format. Evaluation is based on mAP
criterion.
"""
import cPickle
import cv2
import os
import numpy as np
import PIL
from imdb import IMDB
from pascal_voc_eval import voc_eval #voc_eval_sds
from ds_utils import unique_boxes, filter_small_boxes
class PascalVOC(IMDB):
def __init__(self, image_set, root_path, devkit_path, result_path=None, mask_size=-1, binary_thresh=None):
"""
fill basic information to initialize imdb
:param image_set: 2007_trainval, 2007_test, etc
:param root_path: 'selective_search_data' and 'cache'
:param devkit_path: data and results
:return: imdb object
"""
year = image_set.split('_')[0]
image_set = image_set[len(year) + 1 : len(image_set)]
super(PascalVOC, self).__init__('voc_' + year, image_set, root_path, devkit_path, result_path) # set self.name
self.year = year
self.root_path = root_path
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'VOC' + year)
self.classes = ['__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
print 'num_images', self.num_images
self.mask_size = mask_size
self.binary_thresh = binary_thresh
self.config = {'comp_id': 'comp4',
'use_diff': False,
'min_size': 2}
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def segmentation_path_from_index(self, index):
"""
given image index, find out the full path of segmentation class
:param index: index of a specific image
:return: full path of segmentation class
"""
seg_class_file = os.path.join(self.data_path, 'SegmentationClass', index + '.png')
assert os.path.exists(seg_class_file), 'Path does not exist: {}'.format(seg_class_file)
return seg_class_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self.load_pascal_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def gt_segdb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_segdb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
segdb = cPickle.load(fid)
print '{} gt segdb loaded from {}'.format(self.name, cache_file)
return segdb
gt_segdb = [self.load_pascal_segmentation_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_segdb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt segdb to {}'.format(cache_file)
return gt_segdb
def load_pascal_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import xml.etree.ElementTree as ET
roi_rec = dict()
roi_rec['image'] = self.image_path_from_index(index)
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
size = tree.find('size')
roi_rec['height'] = float(size.find('height').text)
roi_rec['width'] = float(size.find('width').text)
#im_size = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR|cv2.IMREAD_IGNORE_ORIENTATION).shape
#assert im_size[0] == roi_rec['height'] and im_size[1] == roi_rec['width']
objs = tree.findall('object')
if not self.config['use_diff']:
non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
class_to_index = dict(zip(self.classes, range(self.num_classes)))
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_to_index[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
roi_rec.update({'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
return roi_rec
def load_selective_search_roidb(self, gt_roidb):
"""
turn selective search proposals into selective search roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import scipy.io
matfile = os.path.join(self.root_path, 'selective_search_data', self.name + '.mat')
assert os.path.exists(matfile), 'selective search data does not exist: {}'.format(matfile)
raw_data = scipy.io.loadmat(matfile)['boxes'].ravel() # original was dict ['images', 'boxes']
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1 # pascal voc dataset starts from 1.
keep = unique_boxes(boxes)
boxes = boxes[keep, :]
keep = filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self, gt_roidb, append_gt=False):
"""
get selective search roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of selective search
"""
cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if append_gt:
print 'appending ground truth annotations'
ss_roidb = self.load_selective_search_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self.load_selective_search_roidb(gt_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def load_pascal_segmentation_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['seg_cls_path', 'flipped']
"""
import xml.etree.ElementTree as ET
seg_rec = dict()
seg_rec['image'] = self.image_path_from_index(index)
size = cv2.imread(seg_rec['image']).shape
seg_rec['height'] = size[0]
seg_rec['width'] = size[1]
seg_rec['seg_cls_path'] = self.segmentation_path_from_index(index)
seg_rec['flipped'] = False
return seg_rec
def evaluate_detections(self, detections):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
# make all these folders for results
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
info = self.do_python_eval()
return info
def evaluate_segmentations(self, pred_segmentations=None):
"""
top level evaluations
:param pred_segmentations: the pred segmentation result
:return: the evaluation results
"""
# make all these folders for results
if not (pred_segmentations is None):
self.write_pascal_segmentation_result(pred_segmentations)
info = self._py_evaluate_segmentation()
return info
def write_pascal_segmentation_result(self, pred_segmentations):
"""
Write pred segmentation to res_file_folder
:param pred_segmentations: the pred segmentation results
:param res_file_folder: the saving folder
:return: [None]
"""
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
result_dir = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
pallete = self.get_pallete(256)
for i, index in enumerate(self.image_set_index):
segmentation_result = np.uint8(np.squeeze(np.copy(pred_segmentations[i])))
segmentation_result = PIL.Image.fromarray(segmentation_result)
segmentation_result.putpalette(pallete)
segmentation_result.save(os.path.join(result_dir, '%s.png'%(index)))
def get_pallete(self, num_cls):
"""
this function is to get the colormap for visualizing the segmentation mask
:param num_cls: the number of visulized class
:return: the pallete
"""
n = num_cls
pallete = [0]*(n*3)
for j in xrange(0,n):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
def get_confusion_matrix(self, gt_label, pred_label, class_num):
"""
Calcute the confusion matrix by given label and pred
:param gt_label: the ground truth label
:param pred_label: the pred label
:param class_num: the nunber of class
:return: the confusion matrix
"""
index = (gt_label * class_num + pred_label).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((class_num, class_num))
for i_label in range(class_num):
for i_pred_label in range(class_num):
cur_index = i_label * class_num + i_pred_label
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred_label] = label_count[cur_index]
return confusion_matrix
def _py_evaluate_segmentation(self):
"""
This function is a wrapper to calculte the metrics for given pred_segmentation results
:param pred_segmentations: the pred segmentation result
:return: the evaluation metrics
"""
confusion_matrix = np.zeros((self.num_classes,self.num_classes))
result_dir = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
for i, index in enumerate(self.image_set_index):
seg_gt_info = self.load_pascal_segmentation_annotation(index)
seg_gt_path = seg_gt_info['seg_cls_path']
seg_gt = np.array(PIL.Image.open(seg_gt_path)).astype('float32')
seg_pred_path = os.path.join(result_dir, '%s.png'%(index))
seg_pred = np.array(PIL.Image.open(seg_pred_path)).astype('float32')
seg_gt = cv2.resize(seg_gt, (seg_pred.shape[1], seg_pred.shape[0]), interpolation=cv2.INTER_NEAREST)
ignore_index = seg_gt != 255
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
confusion_matrix += self.get_confusion_matrix(seg_gt, seg_pred, self.num_classes)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IU = IU_array.mean()
return {'meanIU':mean_IU, 'IU_array':IU_array}
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
:return: a string template
"""
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
:param all_boxes: boxes to be processed [bbox, confidence]
:return: None
"""
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(self):
"""
python evaluation wrapper
:return: info_str
"""
info_str = ''
annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if self.year == 'SDS' or int(self.year) < 2010 else False
print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += '\n'
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.5 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.5 = {:.4f}\n\n'.format(np.mean(aps))
# @0.7
aps = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.7, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.7 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.7 = {:.4f}'.format(np.mean(aps))
return info_str
| 42.781182
| 119
| 0.591428
|
"""
Pascal VOC database
This class loads ground truth notations from standard Pascal VOC XML data formats
and transform them into IMDB format. Selective search is used for proposals, see roidb
function. Results are written as the Pascal VOC format. Evaluation is based on mAP
criterion.
"""
import cPickle
import cv2
import os
import numpy as np
import PIL
from imdb import IMDB
from pascal_voc_eval import voc_eval
from ds_utils import unique_boxes, filter_small_boxes
class PascalVOC(IMDB):
def __init__(self, image_set, root_path, devkit_path, result_path=None, mask_size=-1, binary_thresh=None):
"""
fill basic information to initialize imdb
:param image_set: 2007_trainval, 2007_test, etc
:param root_path: 'selective_search_data' and 'cache'
:param devkit_path: data and results
:return: imdb object
"""
year = image_set.split('_')[0]
image_set = image_set[len(year) + 1 : len(image_set)]
super(PascalVOC, self).__init__('voc_' + year, image_set, root_path, devkit_path, result_path)
self.year = year
self.root_path = root_path
self.devkit_path = devkit_path
self.data_path = os.path.join(devkit_path, 'VOC' + year)
self.classes = ['__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
self.num_classes = len(self.classes)
self.image_set_index = self.load_image_set_index()
self.num_images = len(self.image_set_index)
print 'num_images', self.num_images
self.mask_size = mask_size
self.binary_thresh = binary_thresh
self.config = {'comp_id': 'comp4',
'use_diff': False,
'min_size': 2}
def load_image_set_index(self):
"""
find out which indexes correspond to given image set (train or val)
:return:
"""
image_set_index_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
assert os.path.exists(image_set_index_file), 'Path does not exist: {}'.format(image_set_index_file)
with open(image_set_index_file) as f:
image_set_index = [x.strip() for x in f.readlines()]
return image_set_index
def image_path_from_index(self, index):
"""
given image index, find out full path
:param index: index of a specific image
:return: full path of this image
"""
image_file = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)
return image_file
def segmentation_path_from_index(self, index):
"""
given image index, find out the full path of segmentation class
:param index: index of a specific image
:return: full path of segmentation class
"""
seg_class_file = os.path.join(self.data_path, 'SegmentationClass', index + '.png')
assert os.path.exists(seg_class_file), 'Path does not exist: {}'.format(seg_class_file)
return seg_class_file
def gt_roidb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
gt_roidb = [self.load_pascal_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def gt_segdb(self):
"""
return ground truth image regions database
:return: imdb[image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_segdb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
segdb = cPickle.load(fid)
print '{} gt segdb loaded from {}'.format(self.name, cache_file)
return segdb
gt_segdb = [self.load_pascal_segmentation_annotation(index) for index in self.image_set_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_segdb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt segdb to {}'.format(cache_file)
return gt_segdb
def load_pascal_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import xml.etree.ElementTree as ET
roi_rec = dict()
roi_rec['image'] = self.image_path_from_index(index)
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
size = tree.find('size')
roi_rec['height'] = float(size.find('height').text)
roi_rec['width'] = float(size.find('width').text)
objs = tree.findall('object')
if not self.config['use_diff']:
non_diff_objs = [obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
class_to_index = dict(zip(self.classes, range(self.num_classes)))
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = class_to_index[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
roi_rec.update({'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'max_classes': overlaps.argmax(axis=1),
'max_overlaps': overlaps.max(axis=1),
'flipped': False})
return roi_rec
def load_selective_search_roidb(self, gt_roidb):
"""
turn selective search proposals into selective search roidb
:param gt_roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
:return: roidb: [image_index]['boxes', 'gt_classes', 'gt_overlaps', 'flipped']
"""
import scipy.io
matfile = os.path.join(self.root_path, 'selective_search_data', self.name + '.mat')
assert os.path.exists(matfile), 'selective search data does not exist: {}'.format(matfile)
raw_data = scipy.io.loadmat(matfile)['boxes'].ravel()
box_list = []
for i in range(raw_data.shape[0]):
boxes = raw_data[i][:, (1, 0, 3, 2)] - 1
keep = unique_boxes(boxes)
boxes = boxes[keep, :]
keep = filter_small_boxes(boxes, self.config['min_size'])
boxes = boxes[keep, :]
box_list.append(boxes)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def selective_search_roidb(self, gt_roidb, append_gt=False):
"""
get selective search roidb and ground truth roidb
:param gt_roidb: ground truth roidb
:param append_gt: append ground truth
:return: roidb of selective search
"""
cache_file = os.path.join(self.cache_path, self.name + '_ss_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} ss roidb loaded from {}'.format(self.name, cache_file)
return roidb
if append_gt:
print 'appending ground truth annotations'
ss_roidb = self.load_selective_search_roidb(gt_roidb)
roidb = IMDB.merge_roidbs(gt_roidb, ss_roidb)
else:
roidb = self.load_selective_search_roidb(gt_roidb)
with open(cache_file, 'wb') as fid:
cPickle.dump(roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote ss roidb to {}'.format(cache_file)
return roidb
def load_pascal_segmentation_annotation(self, index):
"""
for a given index, load image and bounding boxes info from XML file
:param index: index of a specific image
:return: record['seg_cls_path', 'flipped']
"""
import xml.etree.ElementTree as ET
seg_rec = dict()
seg_rec['image'] = self.image_path_from_index(index)
size = cv2.imread(seg_rec['image']).shape
seg_rec['height'] = size[0]
seg_rec['width'] = size[1]
seg_rec['seg_cls_path'] = self.segmentation_path_from_index(index)
seg_rec['flipped'] = False
return seg_rec
def evaluate_detections(self, detections):
"""
top level evaluations
:param detections: result matrix, [bbox, confidence]
:return: None
"""
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
self.write_pascal_results(detections)
info = self.do_python_eval()
return info
def evaluate_segmentations(self, pred_segmentations=None):
"""
top level evaluations
:param pred_segmentations: the pred segmentation result
:return: the evaluation results
"""
if not (pred_segmentations is None):
self.write_pascal_segmentation_result(pred_segmentations)
info = self._py_evaluate_segmentation()
return info
def write_pascal_segmentation_result(self, pred_segmentations):
"""
Write pred segmentation to res_file_folder
:param pred_segmentations: the pred segmentation results
:param res_file_folder: the saving folder
:return: [None]
"""
result_dir = os.path.join(self.result_path, 'results')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
year_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year)
if not os.path.exists(year_folder):
os.mkdir(year_folder)
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
if not os.path.exists(res_file_folder):
os.mkdir(res_file_folder)
result_dir = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
if not os.path.exists(result_dir):
os.mkdir(result_dir)
pallete = self.get_pallete(256)
for i, index in enumerate(self.image_set_index):
segmentation_result = np.uint8(np.squeeze(np.copy(pred_segmentations[i])))
segmentation_result = PIL.Image.fromarray(segmentation_result)
segmentation_result.putpalette(pallete)
segmentation_result.save(os.path.join(result_dir, '%s.png'%(index)))
def get_pallete(self, num_cls):
"""
this function is to get the colormap for visualizing the segmentation mask
:param num_cls: the number of visulized class
:return: the pallete
"""
n = num_cls
pallete = [0]*(n*3)
for j in xrange(0,n):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[j*3+1] |= (((lab >> 1) & 1) << (7-i))
pallete[j*3+2] |= (((lab >> 2) & 1) << (7-i))
i = i + 1
lab >>= 3
return pallete
def get_confusion_matrix(self, gt_label, pred_label, class_num):
"""
Calcute the confusion matrix by given label and pred
:param gt_label: the ground truth label
:param pred_label: the pred label
:param class_num: the nunber of class
:return: the confusion matrix
"""
index = (gt_label * class_num + pred_label).astype('int32')
label_count = np.bincount(index)
confusion_matrix = np.zeros((class_num, class_num))
for i_label in range(class_num):
for i_pred_label in range(class_num):
cur_index = i_label * class_num + i_pred_label
if cur_index < len(label_count):
confusion_matrix[i_label, i_pred_label] = label_count[cur_index]
return confusion_matrix
def _py_evaluate_segmentation(self):
"""
This function is a wrapper to calculte the metrics for given pred_segmentation results
:param pred_segmentations: the pred segmentation result
:return: the evaluation metrics
"""
confusion_matrix = np.zeros((self.num_classes,self.num_classes))
result_dir = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Segmentation')
for i, index in enumerate(self.image_set_index):
seg_gt_info = self.load_pascal_segmentation_annotation(index)
seg_gt_path = seg_gt_info['seg_cls_path']
seg_gt = np.array(PIL.Image.open(seg_gt_path)).astype('float32')
seg_pred_path = os.path.join(result_dir, '%s.png'%(index))
seg_pred = np.array(PIL.Image.open(seg_pred_path)).astype('float32')
seg_gt = cv2.resize(seg_gt, (seg_pred.shape[1], seg_pred.shape[0]), interpolation=cv2.INTER_NEAREST)
ignore_index = seg_gt != 255
seg_gt = seg_gt[ignore_index]
seg_pred = seg_pred[ignore_index]
confusion_matrix += self.get_confusion_matrix(seg_gt, seg_pred, self.num_classes)
pos = confusion_matrix.sum(1)
res = confusion_matrix.sum(0)
tp = np.diag(confusion_matrix)
IU_array = (tp / np.maximum(1.0, pos + res - tp))
mean_IU = IU_array.mean()
return {'meanIU':mean_IU, 'IU_array':IU_array}
def get_result_file_template(self):
"""
this is a template
VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
:return: a string template
"""
res_file_folder = os.path.join(self.result_path, 'results', 'VOC' + self.year, 'Main')
comp_id = self.config['comp_id']
filename = comp_id + '_det_' + self.image_set + '_{:s}.txt'
path = os.path.join(res_file_folder, filename)
return path
def write_pascal_results(self, all_boxes):
"""
write results files in pascal devkit path
:param all_boxes: boxes to be processed [bbox, confidence]
:return: None
"""
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self.get_result_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_set_index):
dets = all_boxes[cls_ind][im_ind]
if len(dets) == 0:
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(self):
"""
python evaluation wrapper
:return: info_str
"""
info_str = ''
annopath = os.path.join(self.data_path, 'Annotations', '{0!s}.xml')
imageset_file = os.path.join(self.data_path, 'ImageSets', 'Main', self.image_set + '.txt')
annocache = os.path.join(self.cache_path, self.name + '_annotations.pkl')
aps = []
use_07_metric = True if self.year == 'SDS' or int(self.year) < 2010 else False
print 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += 'VOC07 metric? ' + ('Y' if use_07_metric else 'No')
info_str += '\n'
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.5, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.5 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.5 = {:.4f}\n\n'.format(np.mean(aps))
aps = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
filename = self.get_result_file_template().format(cls)
rec, prec, ap = voc_eval(filename, annopath, imageset_file, cls, annocache,
ovthresh=0.7, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
info_str += 'AP for {} = {:.4f}\n'.format(cls, ap)
print('Mean AP@0.7 = {:.4f}'.format(np.mean(aps)))
info_str += 'Mean AP@0.7 = {:.4f}'.format(np.mean(aps))
return info_str
| false
| true
|
790432f1781660ece2b74af0b67f2530d8e41341
| 7,150
|
py
|
Python
|
display/epd4in2b.py
|
Richard-Kirby/sema_clock
|
f192c8724ede8eb7f0330e295655c21367451cbc
|
[
"MIT"
] | null | null | null |
display/epd4in2b.py
|
Richard-Kirby/sema_clock
|
f192c8724ede8eb7f0330e295655c21367451cbc
|
[
"MIT"
] | null | null | null |
display/epd4in2b.py
|
Richard-Kirby/sema_clock
|
f192c8724ede8eb7f0330e295655c21367451cbc
|
[
"MIT"
] | null | null | null |
##
# @filename : epd4in2b.py
# @brief : Implements for Dual-color e-paper library
# @author : Yehui from Waveshare
#
# Copyright (C) Waveshare August 15 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documnetation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS OR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from . import epdif
from PIL import Image
import RPi.GPIO as GPIO
# Display resolution
EPD_WIDTH = 400
EPD_HEIGHT = 300
# EPD4IN2B commands
PANEL_SETTING = 0x00
POWER_SETTING = 0x01
POWER_OFF = 0x02
POWER_OFF_SEQUENCE_SETTING = 0x03
POWER_ON = 0x04
POWER_ON_MEASURE = 0x05
BOOSTER_SOFT_START = 0x06
DEEP_SLEEP = 0x07
DATA_START_TRANSMISSION_1 = 0x10
DATA_STOP = 0x11
DISPLAY_REFRESH = 0x12
DATA_START_TRANSMISSION_2 = 0x13
VCOM_LUT = 0x20
W2W_LUT = 0x21
B2W_LUT = 0x22
W2B_LUT = 0x23
B2B_LUT = 0x24
PLL_CONTROL = 0x30
TEMPERATURE_SENSOR_CALIBRATION = 0x40
TEMPERATURE_SENSOR_SELECTION = 0x41
TEMPERATURE_SENSOR_WRITE = 0x42
TEMPERATURE_SENSOR_READ = 0x43
VCOM_AND_DATA_INTERVAL_SETTING = 0x50
LOW_POWER_DETECTION = 0x51
TCON_SETTING = 0x60
RESOLUTION_SETTING = 0x61
GSST_SETTING = 0x65
GET_STATUS = 0x71
AUTO_MEASURE_VCOM = 0x80
VCOM_VALUE = 0x81
VCM_DC_SETTING = 0x82
PARTIAL_WINDOW = 0x90
PARTIAL_IN = 0x91
PARTIAL_OUT = 0x92
PROGRAM_MODE = 0xA0
ACTIVE_PROGRAM = 0xA1
READ_OTP_DATA = 0xA2
POWER_SAVING = 0xE3
class EPD:
def __init__(self):
self.reset_pin = epdif.RST_PIN
self.dc_pin = epdif.DC_PIN
self.busy_pin = epdif.BUSY_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
def digital_write(self, pin, value):
epdif.epd_digital_write(pin, value)
def digital_read(self, pin):
return epdif.epd_digital_read(pin)
def delay_ms(self, delaytime):
epdif.epd_delay_ms(delaytime)
def send_command(self, command):
self.digital_write(self.dc_pin, GPIO.LOW)
# the parameter type is list but not int
# so use [command] instead of command
epdif.spi_transfer([command])
def send_data(self, data):
self.digital_write(self.dc_pin, GPIO.HIGH)
# the parameter type is list but not int
# so use [data] instead of data
epdif.spi_transfer([data])
def init(self):
if (epdif.epd_init() != 0):
return -1
self.reset()
self.send_command(BOOSTER_SOFT_START)
self.send_data (0x17)
self.send_data (0x17)
self.send_data (0x17) # 07 0f 17 1f 27 2F 37 2f
self.send_command(POWER_ON)
self.wait_until_idle()
self.send_command(PANEL_SETTING)
self.send_data(0x0F) # LUT from OTP
def wait_until_idle(self):
while(self.digital_read(self.busy_pin) == 0): # 0: busy, 1: idle
self.delay_ms(100)
def reset(self):
self.digital_write(self.reset_pin, GPIO.LOW) # module reset
self.delay_ms(200)
self.digital_write(self.reset_pin, GPIO.HIGH)
self.delay_ms(200)
def get_frame_buffer(self, image):
buf = [0xFF] * int(self.width * self.height / 8)
# Set buffer to value of Python Imaging Library image.
# Image must be in mode 1.
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display \
({0}x{1}).' .format(self.width, self.height))
pixels = image_monocolor.load()
for y in range(self.height):
for x in range(self.width):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
return buf
def display_frame(self, frame_buffer_black, frame_buffer_red):
if (frame_buffer_black != None):
self.send_command(DATA_START_TRANSMISSION_1)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_black[i])
self.delay_ms(2)
if (frame_buffer_red != None):
self.send_command(DATA_START_TRANSMISSION_2)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_red[i])
self.delay_ms(2)
self.send_command(DISPLAY_REFRESH)
self.wait_until_idle()
# after this, call epd.init() to awaken the module
def sleep(self):
self.send_command(VCOM_AND_DATA_INTERVAL_SETTING)
self.send_data(0xF7) # border floating
self.send_command(POWER_OFF)
self.wait_until_idle()
self.send_command(DEEP_SLEEP)
self.send_data(0xA5) # check code
### END OF FILE ###
| 35.75
| 81
| 0.543357
|
from . import epdif
from PIL import Image
import RPi.GPIO as GPIO
EPD_WIDTH = 400
EPD_HEIGHT = 300
PANEL_SETTING = 0x00
POWER_SETTING = 0x01
POWER_OFF = 0x02
POWER_OFF_SEQUENCE_SETTING = 0x03
POWER_ON = 0x04
POWER_ON_MEASURE = 0x05
BOOSTER_SOFT_START = 0x06
DEEP_SLEEP = 0x07
DATA_START_TRANSMISSION_1 = 0x10
DATA_STOP = 0x11
DISPLAY_REFRESH = 0x12
DATA_START_TRANSMISSION_2 = 0x13
VCOM_LUT = 0x20
W2W_LUT = 0x21
B2W_LUT = 0x22
W2B_LUT = 0x23
B2B_LUT = 0x24
PLL_CONTROL = 0x30
TEMPERATURE_SENSOR_CALIBRATION = 0x40
TEMPERATURE_SENSOR_SELECTION = 0x41
TEMPERATURE_SENSOR_WRITE = 0x42
TEMPERATURE_SENSOR_READ = 0x43
VCOM_AND_DATA_INTERVAL_SETTING = 0x50
LOW_POWER_DETECTION = 0x51
TCON_SETTING = 0x60
RESOLUTION_SETTING = 0x61
GSST_SETTING = 0x65
GET_STATUS = 0x71
AUTO_MEASURE_VCOM = 0x80
VCOM_VALUE = 0x81
VCM_DC_SETTING = 0x82
PARTIAL_WINDOW = 0x90
PARTIAL_IN = 0x91
PARTIAL_OUT = 0x92
PROGRAM_MODE = 0xA0
ACTIVE_PROGRAM = 0xA1
READ_OTP_DATA = 0xA2
POWER_SAVING = 0xE3
class EPD:
def __init__(self):
self.reset_pin = epdif.RST_PIN
self.dc_pin = epdif.DC_PIN
self.busy_pin = epdif.BUSY_PIN
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
def digital_write(self, pin, value):
epdif.epd_digital_write(pin, value)
def digital_read(self, pin):
return epdif.epd_digital_read(pin)
def delay_ms(self, delaytime):
epdif.epd_delay_ms(delaytime)
def send_command(self, command):
self.digital_write(self.dc_pin, GPIO.LOW)
epdif.spi_transfer([command])
def send_data(self, data):
self.digital_write(self.dc_pin, GPIO.HIGH)
epdif.spi_transfer([data])
def init(self):
if (epdif.epd_init() != 0):
return -1
self.reset()
self.send_command(BOOSTER_SOFT_START)
self.send_data (0x17)
self.send_data (0x17)
self.send_data (0x17)
self.send_command(POWER_ON)
self.wait_until_idle()
self.send_command(PANEL_SETTING)
self.send_data(0x0F)
def wait_until_idle(self):
while(self.digital_read(self.busy_pin) == 0):
self.delay_ms(100)
def reset(self):
self.digital_write(self.reset_pin, GPIO.LOW)
self.delay_ms(200)
self.digital_write(self.reset_pin, GPIO.HIGH)
self.delay_ms(200)
def get_frame_buffer(self, image):
buf = [0xFF] * int(self.width * self.height / 8)
image_monocolor = image.convert('1')
imwidth, imheight = image_monocolor.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display \
({0}x{1}).' .format(self.width, self.height))
pixels = image_monocolor.load()
for y in range(self.height):
for x in range(self.width):
if pixels[x, y] == 0:
buf[int((x + y * self.width) / 8)] &= ~(0x80 >> (x % 8))
return buf
def display_frame(self, frame_buffer_black, frame_buffer_red):
if (frame_buffer_black != None):
self.send_command(DATA_START_TRANSMISSION_1)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_black[i])
self.delay_ms(2)
if (frame_buffer_red != None):
self.send_command(DATA_START_TRANSMISSION_2)
self.delay_ms(2)
for i in range(0, int(self.width * self.height / 8)):
self.send_data(frame_buffer_red[i])
self.delay_ms(2)
self.send_command(DISPLAY_REFRESH)
self.wait_until_idle()
def sleep(self):
self.send_command(VCOM_AND_DATA_INTERVAL_SETTING)
self.send_data(0xF7)
self.send_command(POWER_OFF)
self.wait_until_idle()
self.send_command(DEEP_SLEEP)
self.send_data(0xA5)
| true
| true
|
7904347fd1cde67ceb16182e873d46ccf0f507bd
| 7,568
|
py
|
Python
|
pylib/Tools/Executor/combinatorial.py
|
emallove/mtt
|
7ea3046ada6d7cc3db7129c4644acad854101adc
|
[
"BSD-3-Clause-Open-MPI"
] | null | null | null |
pylib/Tools/Executor/combinatorial.py
|
emallove/mtt
|
7ea3046ada6d7cc3db7129c4644acad854101adc
|
[
"BSD-3-Clause-Open-MPI"
] | null | null | null |
pylib/Tools/Executor/combinatorial.py
|
emallove/mtt
|
7ea3046ada6d7cc3db7129c4644acad854101adc
|
[
"BSD-3-Clause-Open-MPI"
] | null | null | null |
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: f; python-indent: 4 -*-
#
# Copyright (c) 2015-2018 Intel, Inc. All rights reserved.
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
import sys
import configparser
import importlib
import logging
import imp
import datetime
import tempfile
import shutil
from yapsy.PluginManager import PluginManager
from ExecutorMTTTool import *
## @addtogroup Tools
# @{
# @addtogroup Executor
# @section CombinatorialEx
# Combinatorial execution executor
# @}
class CombinatorialEx(ExecutorMTTTool):
def __init__(self):
# initialise parent class
ExecutorMTTTool.__init__(self)
self.options = {}
self.parser = configparser.ConfigParser()
self.parser.optionxform = str
# Create temp directory to hold .ini files
self.tempDir = tempfile.mkdtemp()
self.baseIniFile = None
self.runLog = {}
self.iniLog = {}
def activate(self):
# use the automatic procedure from IPlugin
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def print_name(self):
return "Combinatorial executor"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
# Create .ini files for each combination to be run
# BaseIniFile created by TestDef ConfigTest()
def createIniLog(self, testDef):
self.baseIniFile = testDef.config
tempSpecialSection = {}
# configParser object to write individual options to files
writeOption = configparser.ConfigParser()
writeOption.optionxform = str
# Sort base .ini sections and write to temp files
for section in self.baseIniFile.sections():
if section == "ENV":
continue
if section.startswith("SKIP") or section.startswith("skip"):
# users often want to temporarily ignore a section
# of their test definition file, but don't want to
# remove it lest they forget what it did. So let
# them just mark the section as "skip" to be ignored
continue
self.parser.add_section(section)
for option in self.baseIniFile.options(section):
self.parser.set(section, option, self.baseIniFile.get(section, option))
# TODO: FIX Getting temp file in tmp dir that is not being removed
fd, fileName = tempfile.mkstemp(suffix=".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
self.parser.write(configfile)
# Clear out parser for next section
self.parser.remove_section(section)
if "MiddlewareGet" in section:
self.runLog[section] = fileName
elif "TestRun" in section:
tempSpecialSection[section] = fileName
else:
self.iniLog[section] = fileName
# Combine TestRun and MiddlewareGet files
tempList = {}
for section in self.runLog:
self.parser.read(self.runLog[section])
for id in tempSpecialSection:
self.parser.read(tempSpecialSection[id])
fd, fileName = tempfile.mkstemp(suffix = ".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
self.parser.write(configfile)
self.parser.remove_section(id)
tempList[fd] = fileName
self.parser.remove_section(section)
self.runLog.clear()
self.runLog = tempList
# Sort sections for comma separated values to be parsed
optionsCSV = {}
for section in self.iniLog:
writeOption.read(self.iniLog[section])
for option in writeOption.options(section):
if ',' in writeOption.get(section, option):
try:
if optionsCSV[section] is not None:
pass
except KeyError:
optionsCSV[section] = []
optionsCSV[section].append(option)
else:
# write option to base run files
for fd in self.runLog:
# set up parser to write to each file
self.parser.read(self.runLog[fd])
if not self.parser.has_section(section):
self.parser.add_section(section)
self.parser.set(section, option, writeOption.get(section, option))
# Want to overwrite file with new parser contents
with open(self.runLog[fd], 'w') as configfile:
self.parser.write(configfile)
# clear parser for next file
for sect in self.parser.sections():
self.parser.remove_section(sect)
writeOption.remove_section(section)
# Process CSV options
for section in optionsCSV:
self.parser.read(self.iniLog[section])
for option in optionsCSV[section]:
# Get clean list of CSV's
rawList = self.parser.get(section, option)
splitList = rawList.split(',')
optionList = []
for item in splitList:
optionList.append(item.strip())
newList = {}
for fd in self.runLog:
writeOption.read(self.runLog[fd])
for nextOpt in optionList:
try:
if writeOption.has_section(section):
pass
except KeyError:
writeOption.add_section(section)
writeOption.set(section, option, nextOpt)
fd, fileName = tempfile.mkstemp(suffix=".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
writeOption.write(configfile)
newList[fd] = fileName
for sect in writeOption.sections():
writeOption.remove_section(sect)
# Update runLog for next pass
self.runLog.clear()
self.runLog = newList
self.parser.remove_section(section)
def execute(self, testDef):
testDef.logger.verbose_print("ExecuteCombinatorial")
status = 0
self.createIniLog(testDef)
try:
if not self.runLog:
print("Error, empty run log, combinatorial executor failed")
sys.exit(1)
for nextFile in self.runLog:
if not os.path.isfile(self.runLog[nextFile]):
print("Test .ini file not found!: " + nextFile)
sys.exit(1)
testDef.configNewTest(self.runLog[nextFile])
sequential_status = testDef.executeTest()
if sequential_status != 0:
status = 1
# clean up temporary files
finally:
shutil.rmtree(self.tempDir)
return status
| 39.010309
| 90
| 0.556818
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import os
import sys
import configparser
import importlib
import logging
import imp
import datetime
import tempfile
import shutil
from yapsy.PluginManager import PluginManager
from ExecutorMTTTool import *
torialEx(ExecutorMTTTool):
def __init__(self):
ExecutorMTTTool.__init__(self)
self.options = {}
self.parser = configparser.ConfigParser()
self.parser.optionxform = str
self.tempDir = tempfile.mkdtemp()
self.baseIniFile = None
self.runLog = {}
self.iniLog = {}
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def print_name(self):
return "Combinatorial executor"
def print_options(self, testDef, prefix):
lines = testDef.printOptions(self.options)
for line in lines:
print(prefix + line)
return
def createIniLog(self, testDef):
self.baseIniFile = testDef.config
tempSpecialSection = {}
writeOption = configparser.ConfigParser()
writeOption.optionxform = str
for section in self.baseIniFile.sections():
if section == "ENV":
continue
if section.startswith("SKIP") or section.startswith("skip"):
# remove it lest they forget what it did. So let
# them just mark the section as "skip" to be ignored
continue
self.parser.add_section(section)
for option in self.baseIniFile.options(section):
self.parser.set(section, option, self.baseIniFile.get(section, option))
# TODO: FIX Getting temp file in tmp dir that is not being removed
fd, fileName = tempfile.mkstemp(suffix=".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
self.parser.write(configfile)
# Clear out parser for next section
self.parser.remove_section(section)
if "MiddlewareGet" in section:
self.runLog[section] = fileName
elif "TestRun" in section:
tempSpecialSection[section] = fileName
else:
self.iniLog[section] = fileName
# Combine TestRun and MiddlewareGet files
tempList = {}
for section in self.runLog:
self.parser.read(self.runLog[section])
for id in tempSpecialSection:
self.parser.read(tempSpecialSection[id])
fd, fileName = tempfile.mkstemp(suffix = ".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
self.parser.write(configfile)
self.parser.remove_section(id)
tempList[fd] = fileName
self.parser.remove_section(section)
self.runLog.clear()
self.runLog = tempList
# Sort sections for comma separated values to be parsed
optionsCSV = {}
for section in self.iniLog:
writeOption.read(self.iniLog[section])
for option in writeOption.options(section):
if ',' in writeOption.get(section, option):
try:
if optionsCSV[section] is not None:
pass
except KeyError:
optionsCSV[section] = []
optionsCSV[section].append(option)
else:
# write option to base run files
for fd in self.runLog:
# set up parser to write to each file
self.parser.read(self.runLog[fd])
if not self.parser.has_section(section):
self.parser.add_section(section)
self.parser.set(section, option, writeOption.get(section, option))
# Want to overwrite file with new parser contents
with open(self.runLog[fd], 'w') as configfile:
self.parser.write(configfile)
# clear parser for next file
for sect in self.parser.sections():
self.parser.remove_section(sect)
writeOption.remove_section(section)
# Process CSV options
for section in optionsCSV:
self.parser.read(self.iniLog[section])
for option in optionsCSV[section]:
# Get clean list of CSV's
rawList = self.parser.get(section, option)
splitList = rawList.split(',')
optionList = []
for item in splitList:
optionList.append(item.strip())
newList = {}
for fd in self.runLog:
writeOption.read(self.runLog[fd])
for nextOpt in optionList:
try:
if writeOption.has_section(section):
pass
except KeyError:
writeOption.add_section(section)
writeOption.set(section, option, nextOpt)
fd, fileName = tempfile.mkstemp(suffix=".ini", dir = self.tempDir)
with open(fileName, 'w') as configfile:
writeOption.write(configfile)
newList[fd] = fileName
for sect in writeOption.sections():
writeOption.remove_section(sect)
self.runLog.clear()
self.runLog = newList
self.parser.remove_section(section)
def execute(self, testDef):
testDef.logger.verbose_print("ExecuteCombinatorial")
status = 0
self.createIniLog(testDef)
try:
if not self.runLog:
print("Error, empty run log, combinatorial executor failed")
sys.exit(1)
for nextFile in self.runLog:
if not os.path.isfile(self.runLog[nextFile]):
print("Test .ini file not found!: " + nextFile)
sys.exit(1)
testDef.configNewTest(self.runLog[nextFile])
sequential_status = testDef.executeTest()
if sequential_status != 0:
status = 1
finally:
shutil.rmtree(self.tempDir)
return status
| true
| true
|
7904350e03509818825fefc1f297fd144a1fef4c
| 14,232
|
py
|
Python
|
pyfr/writers/paraview.py
|
tjcorona/PyFR
|
a72b41580043bb001e5a9e6bb79a0e305d48e052
|
[
"BSD-3-Clause"
] | null | null | null |
pyfr/writers/paraview.py
|
tjcorona/PyFR
|
a72b41580043bb001e5a9e6bb79a0e305d48e052
|
[
"BSD-3-Clause"
] | null | null | null |
pyfr/writers/paraview.py
|
tjcorona/PyFR
|
a72b41580043bb001e5a9e6bb79a0e305d48e052
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Converts .pyfr[m, s] files to a Paraview VTK UnstructuredGrid File"""
from collections import defaultdict
import os
import numpy as np
from pyfr.shapes import BaseShape
from pyfr.util import subclass_where
from pyfr.writers import BaseWriter
class ParaviewWriter(BaseWriter):
# Supported file types and extensions
name = 'paraview'
extn = ['.vtu', '.pvtu']
def __init__(self, args):
super().__init__(args)
self.dtype = np.dtype(args.precision).type
self.divisor = args.divisor or self.cfg.getint('solver', 'order')
def _get_npts_ncells_nnodes(self, mk):
m_inf = self.mesh_inf[mk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=m_inf[0])
subdvcls = subclass_where(BaseShapeSubDiv, name=m_inf[0])
# Number of vis points
npts = shapecls.nspts_from_order(self.divisor + 1)*m_inf[1][1]
# Number of sub cells and nodes
ncells = len(subdvcls.subcells(self.divisor))*m_inf[1][1]
nnodes = len(subdvcls.subnodes(self.divisor))*m_inf[1][1]
return npts, ncells, nnodes
def _get_array_attrs(self, mk=None):
dtype = 'Float32' if self.dtype == np.float32 else 'Float64'
dsize = np.dtype(self.dtype).itemsize
ndims = self.ndims
vvars = self.elementscls.visvarmap[ndims]
names = ['', 'connectivity', 'offsets', 'types']
types = [dtype, 'Int32', 'Int32', 'UInt8']
comps = ['3', '', '', '']
for fname, varnames in vvars.items():
names.append(fname.capitalize())
types.append(dtype)
comps.append(str(len(varnames)))
# If a mesh has been given the compute the sizes
if mk:
npts, ncells, nnodes = self._get_npts_ncells_nnodes(mk)
nb = npts*dsize
sizes = [3*nb, 4*nnodes, 4*ncells, ncells]
sizes.extend(len(varnames)*nb for varnames in vvars.values())
return names, types, comps, sizes
else:
return names, types, comps
def write_out(self):
name, extn = os.path.splitext(self.outf)
parallel = extn == '.pvtu'
parts = defaultdict(list)
for mk, sk in zip(self.mesh_inf, self.soln_inf):
prt = mk.split('_')[-1]
pfn = '{0}_{1}.vtu'.format(name, prt) if parallel else self.outf
parts[pfn].append((mk, sk))
write_s_to_fh = lambda s: fh.write(s.encode('utf-8'))
for pfn, misil in parts.items():
with open(pfn, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="UnstructuredGrid" '
'version="0.1">\n<UnstructuredGrid>\n')
# Running byte-offset for appended data
off = 0
# Header
for mk, sk in misil:
off = self._write_serial_header(fh, mk, off)
write_s_to_fh('</UnstructuredGrid>\n'
'<AppendedData encoding="raw">\n_')
# Data
for mk, sk in misil:
self._write_data(fh, mk, sk)
write_s_to_fh('\n</AppendedData>\n</VTKFile>')
if parallel:
with open(self.outf, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="PUnstructuredGrid" '
'version="0.1">\n<PUnstructuredGrid>\n')
# Header
self._write_parallel_header(fh)
# Constitutent pieces
for pfn in parts:
write_s_to_fh('<Piece Source="{0}"/>\n'
.format(os.path.basename(pfn)))
write_s_to_fh('</PUnstructuredGrid>\n</VTKFile>\n')
def _write_darray(self, array, vtuf, dtype):
array = array.astype(dtype)
np.uint32(array.nbytes).tofile(vtuf)
array.tofile(vtuf)
def _write_serial_header(self, vtuf, mk, off):
names, types, comps, sizes = self._get_array_attrs(mk)
npts, ncells = self._get_npts_ncells_nnodes(mk)[:2]
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<Piece NumberOfPoints="{0}" NumberOfCells="{1}">\n'
.format(npts, ncells))
write_s('<Points>\n')
# Write vtk DaraArray headers
for i, (n, t, c, s) in enumerate(zip(names, types, comps, sizes)):
write_s('<DataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}" '
'format="appended" offset="{3}"/>\n'
.format(n, t, c, off))
off += 4 + s
# Write ends/starts of vtk file objects
if i == 0:
write_s('</Points>\n<Cells>\n')
elif i == 3:
write_s('</Cells>\n<PointData>\n')
# Write end of vtk element data
write_s('</PointData>\n</Piece>\n')
# Return the current offset
return off
def _write_parallel_header(self, vtuf):
names, types, comps = self._get_array_attrs()
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<PPoints>\n')
# Write vtk DaraArray headers
for i, (n, t, s) in enumerate(zip(names, types, comps)):
write_s('<PDataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}"/>\n'.format(n, t, s))
if i == 0:
write_s('</PPoints>\n<PCells>\n')
elif i == 3:
write_s('</PCells>\n<PPointData>\n')
write_s('</PPointData>\n')
def _write_data(self, vtuf, mk, sk):
name = self.mesh_inf[mk][0]
mesh = self.mesh[mk]
soln = self.soln[sk]
# Get the shape and sub division classes
shapecls = subclass_where(BaseShape, name=name)
subdvcls = subclass_where(BaseShapeSubDiv, name=name)
# Dimensions
nspts, neles = mesh.shape[:2]
# Sub divison points inside of a standard element
svpts = shapecls.std_ele(self.divisor)
nsvpts = len(svpts)
# Shape
soln_b = shapecls(nspts, self.cfg)
# Generate the operator matrices
mesh_vtu_op = soln_b.sbasis.nodal_basis_at(svpts)
soln_vtu_op = soln_b.ubasis.nodal_basis_at(svpts)
# Calculate node locations of vtu elements
vpts = np.dot(mesh_vtu_op, mesh.reshape(nspts, -1))
vpts = vpts.reshape(nsvpts, -1, self.ndims)
# Calculate solution at node locations of vtu elements
vsol = np.dot(soln_vtu_op, soln.reshape(-1, self.nvars*neles))
vsol = vsol.reshape(nsvpts, self.nvars, -1).swapaxes(0, 1)
# Append dummy z dimension for points in 2D
if self.ndims == 2:
vpts = np.pad(vpts, [(0, 0), (0, 0), (0, 1)], 'constant')
# Write element node locations to file
self._write_darray(vpts.swapaxes(0, 1), vtuf, self.dtype)
# Perform the sub division
nodes = subdvcls.subnodes(self.divisor)
# Prepare vtu cell arrays
vtu_con = np.tile(nodes, (neles, 1))
vtu_con += (np.arange(neles)*nsvpts)[:, None]
# Generate offset into the connectivity array
vtu_off = np.tile(subdvcls.subcelloffs(self.divisor), (neles, 1))
vtu_off += (np.arange(neles)*len(nodes))[:, None]
# Tile vtu cell type numbers
vtu_typ = np.tile(subdvcls.subcelltypes(self.divisor), neles)
# Write vtu node connectivity, connectivity offsets and cell types
self._write_darray(vtu_con, vtuf, np.int32)
self._write_darray(vtu_off, vtuf, np.int32)
self._write_darray(vtu_typ, vtuf, np.uint8)
# Primitive and visualisation variable maps
privarmap = self.elementscls.privarmap[self.ndims]
visvarmap = self.elementscls.visvarmap[self.ndims]
# Convert from conservative to primitive variables
vsol = np.array(self.elementscls.conv_to_pri(vsol, self.cfg))
# Write out the various fields
for vnames in visvarmap.values():
ix = [privarmap.index(vn) for vn in vnames]
self._write_darray(vsol[ix].T, vtuf, self.dtype)
class BaseShapeSubDiv(object):
vtk_types = dict(tri=5, quad=9, tet=10, pyr=14, pri=13, hex=12)
vtk_nodes = dict(tri=3, quad=4, tet=4, pyr=5, pri=6, hex=8)
@classmethod
def subcells(cls, n):
pass
@classmethod
def subcelloffs(cls, n):
return np.cumsum([cls.vtk_nodes[t] for t in cls.subcells(n)])
@classmethod
def subcelltypes(cls, n):
return np.array([cls.vtk_types[t] for t in cls.subcells(n)])
@classmethod
def subnodes(cls, n):
pass
class TensorProdShapeSubDiv(BaseShapeSubDiv):
@classmethod
def subnodes(cls, n):
conbase = np.array([0, 1, n + 2, n + 1])
# Extend quad mapping to hex mapping
if cls.ndim == 3:
conbase = np.hstack((conbase, conbase + (1 + n)**2))
# Calculate offset of each subdivided element's nodes
nodeoff = np.zeros((n,)*cls.ndim)
for dim, off in enumerate(np.ix_(*(range(n),)*cls.ndim)):
nodeoff += off*(n + 1)**dim
# Tile standard element node ordering mapping, then apply offsets
internal_con = np.tile(conbase, (n**cls.ndim, 1))
internal_con += nodeoff.T.flatten()[:, None]
return np.hstack(internal_con)
class QuadShapeSubDiv(TensorProdShapeSubDiv):
name = 'quad'
ndim = 2
@classmethod
def subcells(cls, n):
return ['quad']*(n**2)
class HexShapeSubDiv(TensorProdShapeSubDiv):
name = 'hex'
ndim = 3
@classmethod
def subcells(cls, n):
return ['hex']*(n**3)
class TriShapeSubDiv(BaseShapeSubDiv):
name = 'tri'
@classmethod
def subcells(cls, n):
return ['tri']*(n**2)
@classmethod
def subnodes(cls, n):
conlst = []
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2
u = l + row + 1
# Base offsets
off = [l, l + 1, u, u + 1, l + 1, u]
# Generate current row
subin = np.ravel(np.arange(row - 1)[..., None] + off)
subex = [ix + row - 1 for ix in off[:3]]
# Extent list
conlst.extend([subin, subex])
return np.hstack(conlst)
class TetShapeSubDiv(BaseShapeSubDiv):
name = 'tet'
@classmethod
def subcells(cls, nsubdiv):
return ['tet']*(nsubdiv**3)
@classmethod
def subnodes(cls, nsubdiv):
conlst = []
jump = 0
for n in range(nsubdiv, 0, -1):
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2 + jump
u = l + row + 1
# Lower and upper for one row up
ln = (n + 1)*(n + 2) // 2 + l - n + row
un = ln + row
rowm1 = np.arange(row - 1)[..., None]
# Base offsets
offs = [(l, l + 1, u, ln), (l + 1, u, ln, ln + 1),
(u, u + 1, ln + 1, un), (u, ln, ln + 1, un),
(l + 1, u, u+1, ln + 1), (u + 1, ln + 1, un, un + 1)]
# Current row
conlst.extend(rowm1 + off for off in offs[:-1])
conlst.append(rowm1[:-1] + offs[-1])
conlst.append([ix + row - 1 for ix in offs[0]])
jump += (n + 1)*(n + 2) // 2
return np.hstack(np.ravel(c) for c in conlst)
class PriShapeSubDiv(BaseShapeSubDiv):
name = 'pri'
@classmethod
def subcells(cls, n):
return ['pri']*(n**3)
@classmethod
def subnodes(cls, n):
# Triangle connectivity
tcon = TriShapeSubDiv.subnodes(n).reshape(-1, 3)
# Layer these rows of triangles to define prisms
loff = (n + 1)*(n + 2) // 2
lcon = [[tcon + i*loff, tcon + (i + 1)*loff] for i in range(n)]
return np.hstack(np.hstack(l).flat for l in lcon)
class PyrShapeSubDiv(BaseShapeSubDiv):
name = 'pyr'
@classmethod
def subcells(cls, n):
cells = []
for i in range(n, 0, -1):
cells += ['pyr']*(i**2 + (i - 1)**2)
cells += ['tet']*(2*i*(i - 1))
return cells
@classmethod
def subnodes(cls, nsubdiv):
lcon = []
# Quad connectivity
qcon = [QuadShapeSubDiv.subnodes(n + 1).reshape(-1, 4)
for n in range(nsubdiv)]
# Simple functions
def _row_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*i + j + 1)
for i in range(a, n + b)
for j in range(n - 1)])
def _col_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*(i + 1) + j)
for i in range(n - 1)
for j in range(a, n + b)])
u = 0
for n in range(nsubdiv, 0, -1):
l = u
u += (n + 1)**2
lower_quad = qcon[n - 1] + l
upper_pts = np.arange(n**2) + u
# First set of pyramids
lcon.append([lower_quad, upper_pts])
if n > 1:
upper_quad = qcon[n - 2] + u
lower_pts = np.hstack(range(k*(n + 1)+1, (k + 1)*n + k)
for k in range(1, n)) + l
# Second set of pyramids
lcon.append([upper_quad[:, ::-1], lower_pts])
lower_row = _row_in_quad(n + 1, 1, -1) + l
lower_col = _col_in_quad(n + 1, 1, -1) + l
upper_row = _row_in_quad(n) + u
upper_col = _col_in_quad(n) + u
# Tetrahedra
lcon.append([lower_col, upper_row])
lcon.append([lower_row[:, ::-1], upper_col])
return np.hstack(np.column_stack(l).flat for l in lcon)
| 31.074236
| 77
| 0.531057
|
from collections import defaultdict
import os
import numpy as np
from pyfr.shapes import BaseShape
from pyfr.util import subclass_where
from pyfr.writers import BaseWriter
class ParaviewWriter(BaseWriter):
name = 'paraview'
extn = ['.vtu', '.pvtu']
def __init__(self, args):
super().__init__(args)
self.dtype = np.dtype(args.precision).type
self.divisor = args.divisor or self.cfg.getint('solver', 'order')
def _get_npts_ncells_nnodes(self, mk):
m_inf = self.mesh_inf[mk]
shapecls = subclass_where(BaseShape, name=m_inf[0])
subdvcls = subclass_where(BaseShapeSubDiv, name=m_inf[0])
npts = shapecls.nspts_from_order(self.divisor + 1)*m_inf[1][1]
ncells = len(subdvcls.subcells(self.divisor))*m_inf[1][1]
nnodes = len(subdvcls.subnodes(self.divisor))*m_inf[1][1]
return npts, ncells, nnodes
def _get_array_attrs(self, mk=None):
dtype = 'Float32' if self.dtype == np.float32 else 'Float64'
dsize = np.dtype(self.dtype).itemsize
ndims = self.ndims
vvars = self.elementscls.visvarmap[ndims]
names = ['', 'connectivity', 'offsets', 'types']
types = [dtype, 'Int32', 'Int32', 'UInt8']
comps = ['3', '', '', '']
for fname, varnames in vvars.items():
names.append(fname.capitalize())
types.append(dtype)
comps.append(str(len(varnames)))
if mk:
npts, ncells, nnodes = self._get_npts_ncells_nnodes(mk)
nb = npts*dsize
sizes = [3*nb, 4*nnodes, 4*ncells, ncells]
sizes.extend(len(varnames)*nb for varnames in vvars.values())
return names, types, comps, sizes
else:
return names, types, comps
def write_out(self):
name, extn = os.path.splitext(self.outf)
parallel = extn == '.pvtu'
parts = defaultdict(list)
for mk, sk in zip(self.mesh_inf, self.soln_inf):
prt = mk.split('_')[-1]
pfn = '{0}_{1}.vtu'.format(name, prt) if parallel else self.outf
parts[pfn].append((mk, sk))
write_s_to_fh = lambda s: fh.write(s.encode('utf-8'))
for pfn, misil in parts.items():
with open(pfn, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="UnstructuredGrid" '
'version="0.1">\n<UnstructuredGrid>\n')
off = 0
for mk, sk in misil:
off = self._write_serial_header(fh, mk, off)
write_s_to_fh('</UnstructuredGrid>\n'
'<AppendedData encoding="raw">\n_')
for mk, sk in misil:
self._write_data(fh, mk, sk)
write_s_to_fh('\n</AppendedData>\n</VTKFile>')
if parallel:
with open(self.outf, 'wb') as fh:
write_s_to_fh('<?xml version="1.0" ?>\n<VTKFile '
'byte_order="LittleEndian" '
'type="PUnstructuredGrid" '
'version="0.1">\n<PUnstructuredGrid>\n')
self._write_parallel_header(fh)
for pfn in parts:
write_s_to_fh('<Piece Source="{0}"/>\n'
.format(os.path.basename(pfn)))
write_s_to_fh('</PUnstructuredGrid>\n</VTKFile>\n')
def _write_darray(self, array, vtuf, dtype):
array = array.astype(dtype)
np.uint32(array.nbytes).tofile(vtuf)
array.tofile(vtuf)
def _write_serial_header(self, vtuf, mk, off):
names, types, comps, sizes = self._get_array_attrs(mk)
npts, ncells = self._get_npts_ncells_nnodes(mk)[:2]
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<Piece NumberOfPoints="{0}" NumberOfCells="{1}">\n'
.format(npts, ncells))
write_s('<Points>\n')
for i, (n, t, c, s) in enumerate(zip(names, types, comps, sizes)):
write_s('<DataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}" '
'format="appended" offset="{3}"/>\n'
.format(n, t, c, off))
off += 4 + s
if i == 0:
write_s('</Points>\n<Cells>\n')
elif i == 3:
write_s('</Cells>\n<PointData>\n')
write_s('</PointData>\n</Piece>\n')
return off
def _write_parallel_header(self, vtuf):
names, types, comps = self._get_array_attrs()
write_s = lambda s: vtuf.write(s.encode('utf-8'))
write_s('<PPoints>\n')
for i, (n, t, s) in enumerate(zip(names, types, comps)):
write_s('<PDataArray Name="{0}" type="{1}" '
'NumberOfComponents="{2}"/>\n'.format(n, t, s))
if i == 0:
write_s('</PPoints>\n<PCells>\n')
elif i == 3:
write_s('</PCells>\n<PPointData>\n')
write_s('</PPointData>\n')
def _write_data(self, vtuf, mk, sk):
name = self.mesh_inf[mk][0]
mesh = self.mesh[mk]
soln = self.soln[sk]
shapecls = subclass_where(BaseShape, name=name)
subdvcls = subclass_where(BaseShapeSubDiv, name=name)
nspts, neles = mesh.shape[:2]
svpts = shapecls.std_ele(self.divisor)
nsvpts = len(svpts)
soln_b = shapecls(nspts, self.cfg)
mesh_vtu_op = soln_b.sbasis.nodal_basis_at(svpts)
soln_vtu_op = soln_b.ubasis.nodal_basis_at(svpts)
vpts = np.dot(mesh_vtu_op, mesh.reshape(nspts, -1))
vpts = vpts.reshape(nsvpts, -1, self.ndims)
vsol = np.dot(soln_vtu_op, soln.reshape(-1, self.nvars*neles))
vsol = vsol.reshape(nsvpts, self.nvars, -1).swapaxes(0, 1)
if self.ndims == 2:
vpts = np.pad(vpts, [(0, 0), (0, 0), (0, 1)], 'constant')
self._write_darray(vpts.swapaxes(0, 1), vtuf, self.dtype)
nodes = subdvcls.subnodes(self.divisor)
vtu_con = np.tile(nodes, (neles, 1))
vtu_con += (np.arange(neles)*nsvpts)[:, None]
vtu_off = np.tile(subdvcls.subcelloffs(self.divisor), (neles, 1))
vtu_off += (np.arange(neles)*len(nodes))[:, None]
vtu_typ = np.tile(subdvcls.subcelltypes(self.divisor), neles)
self._write_darray(vtu_con, vtuf, np.int32)
self._write_darray(vtu_off, vtuf, np.int32)
self._write_darray(vtu_typ, vtuf, np.uint8)
privarmap = self.elementscls.privarmap[self.ndims]
visvarmap = self.elementscls.visvarmap[self.ndims]
vsol = np.array(self.elementscls.conv_to_pri(vsol, self.cfg))
for vnames in visvarmap.values():
ix = [privarmap.index(vn) for vn in vnames]
self._write_darray(vsol[ix].T, vtuf, self.dtype)
class BaseShapeSubDiv(object):
vtk_types = dict(tri=5, quad=9, tet=10, pyr=14, pri=13, hex=12)
vtk_nodes = dict(tri=3, quad=4, tet=4, pyr=5, pri=6, hex=8)
@classmethod
def subcells(cls, n):
pass
@classmethod
def subcelloffs(cls, n):
return np.cumsum([cls.vtk_nodes[t] for t in cls.subcells(n)])
@classmethod
def subcelltypes(cls, n):
return np.array([cls.vtk_types[t] for t in cls.subcells(n)])
@classmethod
def subnodes(cls, n):
pass
class TensorProdShapeSubDiv(BaseShapeSubDiv):
@classmethod
def subnodes(cls, n):
conbase = np.array([0, 1, n + 2, n + 1])
if cls.ndim == 3:
conbase = np.hstack((conbase, conbase + (1 + n)**2))
nodeoff = np.zeros((n,)*cls.ndim)
for dim, off in enumerate(np.ix_(*(range(n),)*cls.ndim)):
nodeoff += off*(n + 1)**dim
# Tile standard element node ordering mapping, then apply offsets
internal_con = np.tile(conbase, (n**cls.ndim, 1))
internal_con += nodeoff.T.flatten()[:, None]
return np.hstack(internal_con)
class QuadShapeSubDiv(TensorProdShapeSubDiv):
name = 'quad'
ndim = 2
@classmethod
def subcells(cls, n):
return ['quad']*(n**2)
class HexShapeSubDiv(TensorProdShapeSubDiv):
name = 'hex'
ndim = 3
@classmethod
def subcells(cls, n):
return ['hex']*(n**3)
class TriShapeSubDiv(BaseShapeSubDiv):
name = 'tri'
@classmethod
def subcells(cls, n):
return ['tri']*(n**2)
@classmethod
def subnodes(cls, n):
conlst = []
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2
u = l + row + 1
# Base offsets
off = [l, l + 1, u, u + 1, l + 1, u]
# Generate current row
subin = np.ravel(np.arange(row - 1)[..., None] + off)
subex = [ix + row - 1 for ix in off[:3]]
# Extent list
conlst.extend([subin, subex])
return np.hstack(conlst)
class TetShapeSubDiv(BaseShapeSubDiv):
name = 'tet'
@classmethod
def subcells(cls, nsubdiv):
return ['tet']*(nsubdiv**3)
@classmethod
def subnodes(cls, nsubdiv):
conlst = []
jump = 0
for n in range(nsubdiv, 0, -1):
for row in range(n, 0, -1):
# Lower and upper indices
l = (n - row)*(n + row + 3) // 2 + jump
u = l + row + 1
# Lower and upper for one row up
ln = (n + 1)*(n + 2) // 2 + l - n + row
un = ln + row
rowm1 = np.arange(row - 1)[..., None]
# Base offsets
offs = [(l, l + 1, u, ln), (l + 1, u, ln, ln + 1),
(u, u + 1, ln + 1, un), (u, ln, ln + 1, un),
(l + 1, u, u+1, ln + 1), (u + 1, ln + 1, un, un + 1)]
# Current row
conlst.extend(rowm1 + off for off in offs[:-1])
conlst.append(rowm1[:-1] + offs[-1])
conlst.append([ix + row - 1 for ix in offs[0]])
jump += (n + 1)*(n + 2) // 2
return np.hstack(np.ravel(c) for c in conlst)
class PriShapeSubDiv(BaseShapeSubDiv):
name = 'pri'
@classmethod
def subcells(cls, n):
return ['pri']*(n**3)
@classmethod
def subnodes(cls, n):
# Triangle connectivity
tcon = TriShapeSubDiv.subnodes(n).reshape(-1, 3)
# Layer these rows of triangles to define prisms
loff = (n + 1)*(n + 2) // 2
lcon = [[tcon + i*loff, tcon + (i + 1)*loff] for i in range(n)]
return np.hstack(np.hstack(l).flat for l in lcon)
class PyrShapeSubDiv(BaseShapeSubDiv):
name = 'pyr'
@classmethod
def subcells(cls, n):
cells = []
for i in range(n, 0, -1):
cells += ['pyr']*(i**2 + (i - 1)**2)
cells += ['tet']*(2*i*(i - 1))
return cells
@classmethod
def subnodes(cls, nsubdiv):
lcon = []
# Quad connectivity
qcon = [QuadShapeSubDiv.subnodes(n + 1).reshape(-1, 4)
for n in range(nsubdiv)]
# Simple functions
def _row_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*i + j + 1)
for i in range(a, n + b)
for j in range(n - 1)])
def _col_in_quad(n, a=0, b=0):
return np.array([(n*i + j, n*(i + 1) + j)
for i in range(n - 1)
for j in range(a, n + b)])
u = 0
for n in range(nsubdiv, 0, -1):
l = u
u += (n + 1)**2
lower_quad = qcon[n - 1] + l
upper_pts = np.arange(n**2) + u
# First set of pyramids
lcon.append([lower_quad, upper_pts])
if n > 1:
upper_quad = qcon[n - 2] + u
lower_pts = np.hstack(range(k*(n + 1)+1, (k + 1)*n + k)
for k in range(1, n)) + l
# Second set of pyramids
lcon.append([upper_quad[:, ::-1], lower_pts])
lower_row = _row_in_quad(n + 1, 1, -1) + l
lower_col = _col_in_quad(n + 1, 1, -1) + l
upper_row = _row_in_quad(n) + u
upper_col = _col_in_quad(n) + u
# Tetrahedra
lcon.append([lower_col, upper_row])
lcon.append([lower_row[:, ::-1], upper_col])
return np.hstack(np.column_stack(l).flat for l in lcon)
| true
| true
|
79043595da46f5ce5100fad9af56dcdd58be848e
| 1,201
|
py
|
Python
|
kloppy/domain/services/matchers/css.py
|
pratikthanki/kloppy
|
ab3293e03f958720489cd2d9e25a1c9f12b9970c
|
[
"BSD-3-Clause"
] | null | null | null |
kloppy/domain/services/matchers/css.py
|
pratikthanki/kloppy
|
ab3293e03f958720489cd2d9e25a1c9f12b9970c
|
[
"BSD-3-Clause"
] | null | null | null |
kloppy/domain/services/matchers/css.py
|
pratikthanki/kloppy
|
ab3293e03f958720489cd2d9e25a1c9f12b9970c
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List
from lxml import etree
from cssselect import GenericTranslator
from kloppy.domain import Event, EventType
class CSSPatternMatcher:
def __init__(self, pattern: str):
self.expression = GenericTranslator().css_to_xpath([pattern])
def match(self, events: List[Event]) -> List[List[Event]]:
elm = etree.Element("start")
root = elm
for i, event in enumerate(events):
if event.event_type != EventType.GENERIC:
elm = etree.SubElement(
elm,
event.event_name.lower()
.replace(" ", "_")
.replace("*", ""),
index=i,
result=str(event.result).lower(),
team=str(event.team.ground).lower(),
attrib={
"class": str(event.result).lower()
+ " "
+ str(event.team.ground).lower()
},
)
matching_events = []
for elm in root.xpath(self.expression):
matching_events.append(events[elm.attrib["index"]])
return matching_events
| 32.459459
| 69
| 0.507077
|
from typing import List
from lxml import etree
from cssselect import GenericTranslator
from kloppy.domain import Event, EventType
class CSSPatternMatcher:
def __init__(self, pattern: str):
self.expression = GenericTranslator().css_to_xpath([pattern])
def match(self, events: List[Event]) -> List[List[Event]]:
elm = etree.Element("start")
root = elm
for i, event in enumerate(events):
if event.event_type != EventType.GENERIC:
elm = etree.SubElement(
elm,
event.event_name.lower()
.replace(" ", "_")
.replace("*", ""),
index=i,
result=str(event.result).lower(),
team=str(event.team.ground).lower(),
attrib={
"class": str(event.result).lower()
+ " "
+ str(event.team.ground).lower()
},
)
matching_events = []
for elm in root.xpath(self.expression):
matching_events.append(events[elm.attrib["index"]])
return matching_events
| true
| true
|
790435fca9254c818a01c7cdb5c270570d0a54d7
| 4,060
|
py
|
Python
|
rdfdatabank/lib/data_sync.py
|
dataflow/RDFDatabank
|
8a3abd28fefc62cbbfb9f77e7ddc920e23794f34
|
[
"MIT"
] | 4
|
2016-01-10T09:05:22.000Z
|
2019-09-09T09:57:25.000Z
|
rdfdatabank/lib/data_sync.py
|
dataflow/RDFDatabank
|
8a3abd28fefc62cbbfb9f77e7ddc920e23794f34
|
[
"MIT"
] | null | null | null |
rdfdatabank/lib/data_sync.py
|
dataflow/RDFDatabank
|
8a3abd28fefc62cbbfb9f77e7ddc920e23794f34
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from rdfdatabank.lib.auth_entry import list_silos, list_usernames, list_group_usernames, add_silo, add_group_users
def sync_members(g):
# NOTE: g._register_silos() IS AN EXPENSIVE OPERATION.
# THIS FUNCTION IS EXPENSIVE AND SHOULD BE CALLED ONLY IF REALLY NECESSARY
#g = ag.granary
g.state.revert()
g._register_silos()
granary_list = g.silos
granary_list_database = list_silos()
usernames = list_usernames()
for silo in granary_list:
if not silo in granary_list_database:
add_silo(silo)
kw = g.describe_silo(silo)
#Get existing owners, admins, managers and submitters from silo metadata
owners = []
admins = []
managers = []
submitters = []
if 'administrators' in kw and kw['administrators']:
admins = [x.strip() for x in kw['administrators'].split(",") if x]
if 'managers' in kw and kw['managers']:
managers = [x.strip() for x in kw['managers'].split(",") if x]
if 'submitters' in kw and kw['submitters']:
submitters = [x.strip() for x in kw['submitters'].split(",") if x]
# Check users in silo metadata are valid users
owners = [x for x in owners if x in usernames]
admins = [x for x in admins if x in usernames]
managers = [x for x in managers if x in usernames]
submitters = [x for x in submitters if x in usernames]
#Synchronize members in silo metadata with users in database
d_admins = []
d_managers = []
d_sunbmitters = []
if silo in granary_list_database:
d_admins, d_managers, d_submitters = list_group_usernames(silo)
admins.extend(d_admins)
managers.extend(d_managers)
submitters.extend(d_submitters)
# Ensure users are listed just once in silo metadata and owner is superset
owners.extend(admins)
owners.extend(managers)
owners.extend(submitters)
admins = list(set(admins))
managers = list(set(managers))
submitters = list(set(submitters))
owners = list(set(owners))
# Add users in silo metadata to the database
new_silo_users = []
for a in admins:
if not a in d_admins:
new_silo_users.append((a, 'administrator'))
for a in managers:
if not a in d_managers:
new_silo_users.append((a, 'manager'))
for a in new_submitters:
if not a in d_submitters:
new_silo_users.append((a, 'submitter'))
if new_silo_users:
add_group_users(silo, new_silo_users)
#Write members into silo
kw['owners'] = ','.join(owners)
kw['administrators'] = ','.join(admins)
kw['managers'] = ','.join(managers)
kw['submitters'] = ','.join(submitters)
g.describe_silo(silo, **kw)
g.sync()
return
| 39.803922
| 114
| 0.652956
|
from rdfdatabank.lib.auth_entry import list_silos, list_usernames, list_group_usernames, add_silo, add_group_users
def sync_members(g):
g.state.revert()
g._register_silos()
granary_list = g.silos
granary_list_database = list_silos()
usernames = list_usernames()
for silo in granary_list:
if not silo in granary_list_database:
add_silo(silo)
kw = g.describe_silo(silo)
owners = []
admins = []
managers = []
submitters = []
if 'administrators' in kw and kw['administrators']:
admins = [x.strip() for x in kw['administrators'].split(",") if x]
if 'managers' in kw and kw['managers']:
managers = [x.strip() for x in kw['managers'].split(",") if x]
if 'submitters' in kw and kw['submitters']:
submitters = [x.strip() for x in kw['submitters'].split(",") if x]
owners = [x for x in owners if x in usernames]
admins = [x for x in admins if x in usernames]
managers = [x for x in managers if x in usernames]
submitters = [x for x in submitters if x in usernames]
d_admins = []
d_managers = []
d_sunbmitters = []
if silo in granary_list_database:
d_admins, d_managers, d_submitters = list_group_usernames(silo)
admins.extend(d_admins)
managers.extend(d_managers)
submitters.extend(d_submitters)
owners.extend(admins)
owners.extend(managers)
owners.extend(submitters)
admins = list(set(admins))
managers = list(set(managers))
submitters = list(set(submitters))
owners = list(set(owners))
new_silo_users = []
for a in admins:
if not a in d_admins:
new_silo_users.append((a, 'administrator'))
for a in managers:
if not a in d_managers:
new_silo_users.append((a, 'manager'))
for a in new_submitters:
if not a in d_submitters:
new_silo_users.append((a, 'submitter'))
if new_silo_users:
add_group_users(silo, new_silo_users)
kw['owners'] = ','.join(owners)
kw['administrators'] = ','.join(admins)
kw['managers'] = ','.join(managers)
kw['submitters'] = ','.join(submitters)
g.describe_silo(silo, **kw)
g.sync()
return
| true
| true
|
790436659fc09d186eb38514c3add1c093b11f80
| 6,712
|
py
|
Python
|
Titanic/analysis/colab_titanic_main.py
|
couyang24/general_learning-tiffany
|
fa358e6f3b14386519295a8959ad02512f92fb95
|
[
"Apache-2.0"
] | null | null | null |
Titanic/analysis/colab_titanic_main.py
|
couyang24/general_learning-tiffany
|
fa358e6f3b14386519295a8959ad02512f92fb95
|
[
"Apache-2.0"
] | 27
|
2020-07-19T16:14:40.000Z
|
2021-09-19T01:24:42.000Z
|
Titanic/analysis/colab_titanic_main.py
|
couyang24/general_learning-tiffany
|
fa358e6f3b14386519295a8959ad02512f92fb95
|
[
"Apache-2.0"
] | 2
|
2020-05-16T18:47:05.000Z
|
2020-10-15T10:58:42.000Z
|
# <a href="https://colab.research.google.com/github/couyang24/general_learning-tiffany/blob/master/Titanic/analysis/colab_titanic_main.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Need to mount Drive on or upload kaggle.json
from google.colab import drive
drive.mount("/content/drive")
# !mkdir ~/.kaggle/
# !cp drive/My\ Drive/input/kaggle.json ~/.kaggle/
# !kaggle competitions download -c titanic
# Load Package
# import numpy as np
import pandas as pd
import seaborn as sns
import featuretools
import featuretools as ft
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import (
OneHotEncoder,
StandardScaler,
LabelEncoder,
OrdinalEncoder,
)
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import cross_val_score, RandomizedSearchCV
# Load data
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
# Save data
target = train_df[["Survived"]]
submission = test_df[["PassengerId"]]
# Join and Clean
combine = pd.concat([train_df, test_df])
# EDA
combine.info()
combine.columns
mapping = {
"Mlle": "Miss",
"Major": "Mr",
"Col": "Mr",
"Sir": "Mr",
"Don": "Mr",
"Mme": "Miss",
"Jonkheer": "Mr",
"Lady": "Mrs",
"Capt": "Mr",
"Countess": "Mrs",
"Ms": "Miss",
"Dona": "Mrs",
}
combine["Title"] = combine.Name.apply(
lambda x: x.split(".")[0].split(",")[1].strip()
).replace(mapping)
combine.drop(["Cabin", "Ticket", "Name"], axis=1, inplace=True)
# +
# combine['Sex2'] = combine['Sex'].apply(lambda x: 0 if x=='female' else 1)
# +
# class ModifiedLabelEncoder(LabelEncoder):
# def fit_transform(self, y, *args, **kwargs):
# return super().fit_transform(y)
# def transform(self, y, *args, **kwargs):
# return super().transform(y)
# +
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("encode", OrdinalEncoder()),
]
)
numeric_transformer = Pipeline([("imputer", SimpleImputer(strategy="median")),])
# -
combine[["Sex", "Embarked", "Title"]] = categorical_transformer.fit_transform(
combine[["Sex", "Embarked", "Title"]]
)
combine[["Age", "Fare"]] = numeric_transformer.fit_transform(combine[["Age", "Fare"]])
# +
es = ft.EntitySet(id="titanic_data")
es = es.entity_from_dataframe(
entity_id="combine",
dataframe=combine.drop(["Survived"], axis=1),
variable_types={
"Embarked": ft.variable_types.Categorical,
"Sex": ft.variable_types.Boolean,
"Title": ft.variable_types.Categorical,
},
index="PassengerId",
)
es
# -
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Embarked", index="Embarked"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Sex", index="Sex")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Title", index="Title")
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Pclass", index="Pclass"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Parch", index="Parch")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="SibSp", index="SibSp")
es
primitives = ft.list_primitives()
pd.options.display.max_colwidth = 100
primitives[primitives["type"] == "aggregation"].head(
primitives[primitives["type"] == "aggregation"].shape[0]
)
primitives[primitives["type"] == "transform"].head(
primitives[primitives["type"] == "transform"].shape[0]
)
features, feature_names = ft.dfs(
entityset=es,
target_entity="combine",
# trans_primitives=['subtract_numeric', 'add_numeric', 'divide_numeric', 'multiply_numeric'],
max_depth=2,
)
feature_names
len(feature_names)
features.isnull().sum()
class RemoveLowInfo(BaseEstimator, TransformerMixin):
def __init__(self, threshold):
self.threshold = threshold
def fit(self, X, y=None):
return self
def transform(self, X):
df = X.copy()
keep = [
column
for column in df.columns
if df[column].value_counts(normalize=True).reset_index(drop=True)[0]
< self.threshold
]
return df[keep]
from sklearn.preprocessing import OneHotEncoder, StandardScaler, FunctionTransformer
impute_median = FunctionTransformer(lambda x: x.fillna(x.median()), validate=False)
normalize = FunctionTransformer(lambda x: (x - x.mean()) / x.std(), validate=False)
from sklearn.decomposition import PCA
transformer = Pipeline(
[
("imputer", impute_median),
("removelowinfo", RemoveLowInfo(threshold=0.95)),
("scaler", normalize),
]
)
clean_features = transformer.fit_transform(features)
# !pip install catboost
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
AdaBoostClassifier,
BaggingClassifier,
VotingClassifier,
)
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
import xgboost as xgb
import lightgbm as lgb
import catboost as cgb
# +
methods = [
("logistic", LogisticRegression(solver="lbfgs")),
# ('sgd', SGDClassifier()),
("tree", DecisionTreeClassifier()),
("bag", BaggingClassifier()),
("xgb", xgb.XGBClassifier(max_depth=3)),
("lgb", lgb.LGBMClassifier(max_depth=3)),
# ('cgb', cgb.CatBoostClassifier(max_depth=3,silent=True)),
("ada", AdaBoostClassifier()),
("gbm", GradientBoostingClassifier()),
("rf", RandomForestClassifier(n_estimators=100)),
# ('svc', LinearSVC()),
# ('rbf', SVC()),
("nb", Pipeline([("pca", PCA()), ("gnb", GaussianNB())])),
("nn", MLPClassifier()),
("knn", KNeighborsClassifier()),
]
ensemble = VotingClassifier(
methods,
voting="soft",
# weights=[1,1,1,1,2,2,1,1],
# flatten_transform=True,
)
clf = Pipeline(
[
# ('transformer', transformer),
("ensemble", ensemble),
]
)
clf.fit(clean_features.iloc[: train_df.shape[0], :], target)
# -
submission["Survived"] = pd.DataFrame(
clf.predict(clean_features.iloc[train_df.shape[0] :, :])
)
print(submission.dtypes)
submission.to_csv("titanic_submission.csv", index=False)
| 26.425197
| 252
| 0.674762
|
from google.colab import drive
drive.mount("/content/drive")
import pandas as pd
import seaborn as sns
import featuretools
import featuretools as ft
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import (
OneHotEncoder,
StandardScaler,
LabelEncoder,
OrdinalEncoder,
)
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import cross_val_score, RandomizedSearchCV
train_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
target = train_df[["Survived"]]
submission = test_df[["PassengerId"]]
combine = pd.concat([train_df, test_df])
combine.info()
combine.columns
mapping = {
"Mlle": "Miss",
"Major": "Mr",
"Col": "Mr",
"Sir": "Mr",
"Don": "Mr",
"Mme": "Miss",
"Jonkheer": "Mr",
"Lady": "Mrs",
"Capt": "Mr",
"Countess": "Mrs",
"Ms": "Miss",
"Dona": "Mrs",
}
combine["Title"] = combine.Name.apply(
lambda x: x.split(".")[0].split(",")[1].strip()
).replace(mapping)
combine.drop(["Cabin", "Ticket", "Name"], axis=1, inplace=True)
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("encode", OrdinalEncoder()),
]
)
numeric_transformer = Pipeline([("imputer", SimpleImputer(strategy="median")),])
combine[["Sex", "Embarked", "Title"]] = categorical_transformer.fit_transform(
combine[["Sex", "Embarked", "Title"]]
)
combine[["Age", "Fare"]] = numeric_transformer.fit_transform(combine[["Age", "Fare"]])
es = ft.EntitySet(id="titanic_data")
es = es.entity_from_dataframe(
entity_id="combine",
dataframe=combine.drop(["Survived"], axis=1),
variable_types={
"Embarked": ft.variable_types.Categorical,
"Sex": ft.variable_types.Boolean,
"Title": ft.variable_types.Categorical,
},
index="PassengerId",
)
es
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Embarked", index="Embarked"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Sex", index="Sex")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Title", index="Title")
es = es.normalize_entity(
base_entity_id="combine", new_entity_id="Pclass", index="Pclass"
)
es = es.normalize_entity(base_entity_id="combine", new_entity_id="Parch", index="Parch")
es = es.normalize_entity(base_entity_id="combine", new_entity_id="SibSp", index="SibSp")
es
primitives = ft.list_primitives()
pd.options.display.max_colwidth = 100
primitives[primitives["type"] == "aggregation"].head(
primitives[primitives["type"] == "aggregation"].shape[0]
)
primitives[primitives["type"] == "transform"].head(
primitives[primitives["type"] == "transform"].shape[0]
)
features, feature_names = ft.dfs(
entityset=es,
target_entity="combine",
max_depth=2,
)
feature_names
len(feature_names)
features.isnull().sum()
class RemoveLowInfo(BaseEstimator, TransformerMixin):
def __init__(self, threshold):
self.threshold = threshold
def fit(self, X, y=None):
return self
def transform(self, X):
df = X.copy()
keep = [
column
for column in df.columns
if df[column].value_counts(normalize=True).reset_index(drop=True)[0]
< self.threshold
]
return df[keep]
from sklearn.preprocessing import OneHotEncoder, StandardScaler, FunctionTransformer
impute_median = FunctionTransformer(lambda x: x.fillna(x.median()), validate=False)
normalize = FunctionTransformer(lambda x: (x - x.mean()) / x.std(), validate=False)
from sklearn.decomposition import PCA
transformer = Pipeline(
[
("imputer", impute_median),
("removelowinfo", RemoveLowInfo(threshold=0.95)),
("scaler", normalize),
]
)
clean_features = transformer.fit_transform(features)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
AdaBoostClassifier,
BaggingClassifier,
VotingClassifier,
)
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
import xgboost as xgb
import lightgbm as lgb
import catboost as cgb
methods = [
("logistic", LogisticRegression(solver="lbfgs")),
("tree", DecisionTreeClassifier()),
("bag", BaggingClassifier()),
("xgb", xgb.XGBClassifier(max_depth=3)),
("lgb", lgb.LGBMClassifier(max_depth=3)),
("ada", AdaBoostClassifier()),
("gbm", GradientBoostingClassifier()),
("rf", RandomForestClassifier(n_estimators=100)),
("nb", Pipeline([("pca", PCA()), ("gnb", GaussianNB())])),
("nn", MLPClassifier()),
("knn", KNeighborsClassifier()),
]
ensemble = VotingClassifier(
methods,
voting="soft",
)
clf = Pipeline(
[
("ensemble", ensemble),
]
)
clf.fit(clean_features.iloc[: train_df.shape[0], :], target)
submission["Survived"] = pd.DataFrame(
clf.predict(clean_features.iloc[train_df.shape[0] :, :])
)
print(submission.dtypes)
submission.to_csv("titanic_submission.csv", index=False)
| true
| true
|
7904366ac66d89a10ab08531845b2896ead95da9
| 1,012
|
py
|
Python
|
setup.py
|
tropicbliss/MCsniperPY
|
ad0337fb10f90e18b5a648a150c2179d12048522
|
[
"MIT"
] | null | null | null |
setup.py
|
tropicbliss/MCsniperPY
|
ad0337fb10f90e18b5a648a150c2179d12048522
|
[
"MIT"
] | null | null | null |
setup.py
|
tropicbliss/MCsniperPY
|
ad0337fb10f90e18b5a648a150c2179d12048522
|
[
"MIT"
] | null | null | null |
import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="MCsniperPY",
version="0.20.6",
description="Minecraft name sniper written in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MCsniperPY/MCsniperPY",
author="Kqzz",
license="MIT",
packages=["mcsniperpy", "mcsniperpy.util", "mcsniperpy.util.classes"],
install_requires=["typer", "aiohttp", "colorama", "bs4"],
entry_points={"console_scripts": ["mcsniperpy=mcsniperpy.cli:cli"]},
python_requires=">=3.8",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License", # Again, pick a license
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| 33.733333
| 75
| 0.652174
|
import pathlib
from setuptools import setup
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
setup(
name="MCsniperPY",
version="0.20.6",
description="Minecraft name sniper written in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/MCsniperPY/MCsniperPY",
author="Kqzz",
license="MIT",
packages=["mcsniperpy", "mcsniperpy.util", "mcsniperpy.util.classes"],
install_requires=["typer", "aiohttp", "colorama", "bs4"],
entry_points={"console_scripts": ["mcsniperpy=mcsniperpy.cli:cli"]},
python_requires=">=3.8",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
| true
| true
|
790436c2a2e54b1015bb3b69c4a62b273aa3398b
| 412
|
py
|
Python
|
app/forms.py
|
justinvasel/justinvasel.com
|
a0765e5d4d6505fa1e02aa7b73e8039cf19ba646
|
[
"MIT"
] | null | null | null |
app/forms.py
|
justinvasel/justinvasel.com
|
a0765e5d4d6505fa1e02aa7b73e8039cf19ba646
|
[
"MIT"
] | 2
|
2018-04-10T11:39:49.000Z
|
2020-09-26T05:35:17.000Z
|
app/forms.py
|
justinvasel/justinvasel.com
|
a0765e5d4d6505fa1e02aa7b73e8039cf19ba646
|
[
"MIT"
] | null | null | null |
from app import models
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign In')
| 37.454545
| 76
| 0.783981
|
from app import models
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign In')
| true
| true
|
79043723c9cd53c11f21fb7b99efb9992d23ccde
| 109
|
py
|
Python
|
home/__init__.py
|
abkraynak/smart-home
|
26439a322ee75811731da413c8e80d8fb89ab876
|
[
"MIT"
] | null | null | null |
home/__init__.py
|
abkraynak/smart-home
|
26439a322ee75811731da413c8e80d8fb89ab876
|
[
"MIT"
] | null | null | null |
home/__init__.py
|
abkraynak/smart-home
|
26439a322ee75811731da413c8e80d8fb89ab876
|
[
"MIT"
] | null | null | null |
# __init.py
from .home import Home
from .alarm import Alarm
from .light import Light
from .lock import Lock
| 18.166667
| 24
| 0.770642
|
from .home import Home
from .alarm import Alarm
from .light import Light
from .lock import Lock
| true
| true
|
790437a75eefe189e227cc30e0488b2cdd89e4e9
| 1,928
|
py
|
Python
|
redis_metrics/management/commands/generate_test_metrics.py
|
bradmontgomery/django-redis-metrics
|
b1466b5742f3f1e3aac4264cb8b73e25e765e972
|
[
"MIT"
] | 52
|
2015-01-03T19:40:50.000Z
|
2022-01-23T14:08:43.000Z
|
redis_metrics/management/commands/generate_test_metrics.py
|
bradmontgomery/django-redis-metrics
|
b1466b5742f3f1e3aac4264cb8b73e25e765e972
|
[
"MIT"
] | 31
|
2015-01-05T10:28:50.000Z
|
2020-03-30T15:42:35.000Z
|
redis_metrics/management/commands/generate_test_metrics.py
|
bradmontgomery/django-redis-metrics
|
b1466b5742f3f1e3aac4264cb8b73e25e765e972
|
[
"MIT"
] | 11
|
2015-03-07T12:15:53.000Z
|
2019-11-03T15:31:59.000Z
|
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from redis_metrics.utils import generate_test_metrics
class Command(BaseCommand):
args = '<metric-name> [<metric-name> ...]'
help = "Creates Lots of Dummy Metrics"
option_list = BaseCommand.option_list + (
make_option(
'-r',
'--randomize',
action='store_true',
dest='randomize',
default=True,
help='Randomize Metric Data'
),
make_option(
'--no-randomize',
action='store_false',
dest='randomize',
default=True,
help='Do not randomize Metric Data'
),
make_option(
'-n',
'--num-days',
action='store',
dest='num_days',
type="int",
default=365 * 3, # Default to 3 years
help='Number of Days worth of data to generate'
),
make_option(
'-c',
'--cap',
action='store',
dest='cap',
default=None,
help='Cap the maximum metric value'
),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("You must provide at least one metric name")
slugs = args
cap = options["cap"]
days = options["num_days"]
randomize = options["randomize"]
self.stdout.write("\nGenerating metrics using the following:\n")
self.stdout.write("Slugs: {0}\n".format(u", ".join(slugs)))
self.stdout.write("Days: {0}\n".format(days))
self.stdout.write("Randomize: {0}\n".format(randomize))
self.stdout.write("Cap: {0}\n".format(cap))
for slug in slugs:
generate_test_metrics(slug, num=days, randomize=randomize, cap=cap)
| 31.096774
| 79
| 0.544606
|
from __future__ import unicode_literals
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from redis_metrics.utils import generate_test_metrics
class Command(BaseCommand):
args = '<metric-name> [<metric-name> ...]'
help = "Creates Lots of Dummy Metrics"
option_list = BaseCommand.option_list + (
make_option(
'-r',
'--randomize',
action='store_true',
dest='randomize',
default=True,
help='Randomize Metric Data'
),
make_option(
'--no-randomize',
action='store_false',
dest='randomize',
default=True,
help='Do not randomize Metric Data'
),
make_option(
'-n',
'--num-days',
action='store',
dest='num_days',
type="int",
default=365 * 3,
help='Number of Days worth of data to generate'
),
make_option(
'-c',
'--cap',
action='store',
dest='cap',
default=None,
help='Cap the maximum metric value'
),
)
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("You must provide at least one metric name")
slugs = args
cap = options["cap"]
days = options["num_days"]
randomize = options["randomize"]
self.stdout.write("\nGenerating metrics using the following:\n")
self.stdout.write("Slugs: {0}\n".format(u", ".join(slugs)))
self.stdout.write("Days: {0}\n".format(days))
self.stdout.write("Randomize: {0}\n".format(randomize))
self.stdout.write("Cap: {0}\n".format(cap))
for slug in slugs:
generate_test_metrics(slug, num=days, randomize=randomize, cap=cap)
| true
| true
|
79043bdca4d5df85a74771eeed3b7d0eff306609
| 336
|
gyp
|
Python
|
binding.gyp
|
chipsalliance/tree-sitter-firrtl
|
e17cc559154f1f43daad00651781107f32a9d9f4
|
[
"Apache-2.0"
] | 5
|
2020-01-25T03:46:46.000Z
|
2021-11-14T17:13:35.000Z
|
binding.gyp
|
chipsalliance/tree-sitter-firrtl
|
e17cc559154f1f43daad00651781107f32a9d9f4
|
[
"Apache-2.0"
] | 2
|
2020-05-09T17:08:58.000Z
|
2021-04-30T16:50:56.000Z
|
binding.gyp
|
chipsalliance/tree-sitter-firrtl
|
e17cc559154f1f43daad00651781107f32a9d9f4
|
[
"Apache-2.0"
] | 3
|
2021-05-17T03:12:45.000Z
|
2022-01-30T09:18:55.000Z
|
{
"targets": [
{
"target_name": "tree_sitter_firrtl_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"src/parser.c",
"bindings/node/binding.cc",
"src/scanner.cc"
],
"cflags_c": [
"-std=c99",
]
}
]
}
| 16.8
| 50
| 0.410714
|
{
"targets": [
{
"target_name": "tree_sitter_firrtl_binding",
"include_dirs": [
"<!(node -e \"require('nan')\")",
"src"
],
"sources": [
"src/parser.c",
"bindings/node/binding.cc",
"src/scanner.cc"
],
"cflags_c": [
"-std=c99",
]
}
]
}
| true
| true
|
79043c031d2adb4623ba80798ec83e7fb544c24a
| 983
|
py
|
Python
|
server.py
|
Mogekoff/nachess
|
87027279b804c925ea3051cf8dd5f1b3709b1e7b
|
[
"MIT"
] | null | null | null |
server.py
|
Mogekoff/nachess
|
87027279b804c925ea3051cf8dd5f1b3709b1e7b
|
[
"MIT"
] | null | null | null |
server.py
|
Mogekoff/nachess
|
87027279b804c925ea3051cf8dd5f1b3709b1e7b
|
[
"MIT"
] | null | null | null |
import socket
import sys
from config import ip, port
net = 0
sock = None
try:
if sys.argv[1] == '--connect':
sock = socket.socket()
try:
sock.connect((sys.argv[2], int(sys.argv[3])))
print('Подключение к игре установлено.')
except:
print(f'Неудалось подключиться к игре по адресу {sys.argv[2]}:{sys.argv[3]}')
net = 1
elif sys.argv[1] == '--server':
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((ip, port))
server.listen(1)
print(f'Сервер запущен по адресу {ip}:{port}...')
sock, address = server.accept()
print(f'Клиент {address[0]}:{address[1]} подключился')
except:
print(f'Неудалось запустить сервер по адресу {ip}:{port}')
net = 2
else:
print(f'Неизвестный параметр \'{sys.argv[1]}\'')
except:
print('Запускается одиночная игра на одном экране')
| 31.709677
| 89
| 0.563581
|
import socket
import sys
from config import ip, port
net = 0
sock = None
try:
if sys.argv[1] == '--connect':
sock = socket.socket()
try:
sock.connect((sys.argv[2], int(sys.argv[3])))
print('Подключение к игре установлено.')
except:
print(f'Неудалось подключиться к игре по адресу {sys.argv[2]}:{sys.argv[3]}')
net = 1
elif sys.argv[1] == '--server':
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((ip, port))
server.listen(1)
print(f'Сервер запущен по адресу {ip}:{port}...')
sock, address = server.accept()
print(f'Клиент {address[0]}:{address[1]} подключился')
except:
print(f'Неудалось запустить сервер по адресу {ip}:{port}')
net = 2
else:
print(f'Неизвестный параметр \'{sys.argv[1]}\'')
except:
print('Запускается одиночная игра на одном экране')
| true
| true
|
79043c4a2b502fc7c643b0638228e702db778599
| 2,611
|
py
|
Python
|
neuro-cli/tests/unit/formatters/test_blob_formatters.py
|
neuro-inc/platform-client-python
|
012e355249ea900b76f9ce4209fb9d029652f9b2
|
[
"Apache-2.0"
] | 11
|
2020-10-11T15:38:11.000Z
|
2021-11-09T11:29:50.000Z
|
neuro-cli/tests/unit/formatters/test_blob_formatters.py
|
neuro-inc/platform-client-python
|
012e355249ea900b76f9ce4209fb9d029652f9b2
|
[
"Apache-2.0"
] | 611
|
2020-09-30T21:27:52.000Z
|
2022-01-10T10:44:44.000Z
|
neuro-cli/tests/unit/formatters/test_blob_formatters.py
|
neuro-inc/platform-client-python
|
012e355249ea900b76f9ce4209fb9d029652f9b2
|
[
"Apache-2.0"
] | 1
|
2020-10-05T15:10:24.000Z
|
2020-10-05T15:10:24.000Z
|
from datetime import datetime
from typing import Any, List, Union
import pytest
from neuro_sdk import BlobCommonPrefix, BlobObject, Bucket, BucketEntry
from neuro_cli.formatters.blob_storage import (
BaseBlobFormatter,
LongBlobFormatter,
SimpleBlobFormatter,
)
class TestBlobFormatter:
buckets: List[Bucket] = [
Bucket(
id="bucket-1",
name="neuro-my-bucket",
created_at=datetime(2018, 1, 1, 3),
cluster_name="test-cluster",
owner="test-user",
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-2",
name="neuro-public-bucket",
created_at=datetime(2018, 1, 1, 17, 2, 4),
cluster_name="test-cluster",
owner="public",
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-3",
name="neuro-shared-bucket",
created_at=datetime(2018, 1, 1, 13, 1, 5),
cluster_name="test-cluster",
owner="another-user",
provider=Bucket.Provider.AWS,
imported=False,
),
]
blobs: List[BucketEntry] = [
BlobObject(
key="file1024.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[0],
size=1024,
),
BlobObject(
key="file_bigger.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[1],
size=1_024_001,
),
BlobObject(
key="folder2/info.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[2],
size=240,
),
BlobObject(
key="folder2/",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[2],
size=0,
),
]
folders: List[BucketEntry] = [
BlobCommonPrefix(bucket=buckets[0], key="folder1/", size=0),
BlobCommonPrefix(bucket=buckets[1], key="folder2/", size=0),
]
all: List[Union[Bucket, BucketEntry]] = [*buckets, *blobs, *folders]
@pytest.mark.parametrize(
"formatter",
[
(SimpleBlobFormatter(color=False, uri_formatter=str)),
(LongBlobFormatter(human_readable=False, color=False, uri_formatter=str)),
],
)
def test_long_formatter(self, rich_cmp: Any, formatter: BaseBlobFormatter) -> None:
for index, item in enumerate(self.all):
rich_cmp(formatter(item), index=index)
| 29.011111
| 87
| 0.542704
|
from datetime import datetime
from typing import Any, List, Union
import pytest
from neuro_sdk import BlobCommonPrefix, BlobObject, Bucket, BucketEntry
from neuro_cli.formatters.blob_storage import (
BaseBlobFormatter,
LongBlobFormatter,
SimpleBlobFormatter,
)
class TestBlobFormatter:
buckets: List[Bucket] = [
Bucket(
id="bucket-1",
name="neuro-my-bucket",
created_at=datetime(2018, 1, 1, 3),
cluster_name="test-cluster",
owner="test-user",
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-2",
name="neuro-public-bucket",
created_at=datetime(2018, 1, 1, 17, 2, 4),
cluster_name="test-cluster",
owner="public",
provider=Bucket.Provider.AWS,
imported=False,
),
Bucket(
id="bucket-3",
name="neuro-shared-bucket",
created_at=datetime(2018, 1, 1, 13, 1, 5),
cluster_name="test-cluster",
owner="another-user",
provider=Bucket.Provider.AWS,
imported=False,
),
]
blobs: List[BucketEntry] = [
BlobObject(
key="file1024.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[0],
size=1024,
),
BlobObject(
key="file_bigger.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[1],
size=1_024_001,
),
BlobObject(
key="folder2/info.txt",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[2],
size=240,
),
BlobObject(
key="folder2/",
modified_at=datetime(2018, 1, 1, 14, 0, 0),
bucket=buckets[2],
size=0,
),
]
folders: List[BucketEntry] = [
BlobCommonPrefix(bucket=buckets[0], key="folder1/", size=0),
BlobCommonPrefix(bucket=buckets[1], key="folder2/", size=0),
]
all: List[Union[Bucket, BucketEntry]] = [*buckets, *blobs, *folders]
@pytest.mark.parametrize(
"formatter",
[
(SimpleBlobFormatter(color=False, uri_formatter=str)),
(LongBlobFormatter(human_readable=False, color=False, uri_formatter=str)),
],
)
def test_long_formatter(self, rich_cmp: Any, formatter: BaseBlobFormatter) -> None:
for index, item in enumerate(self.all):
rich_cmp(formatter(item), index=index)
| true
| true
|
79043cf41aa29b5fb39b3b7e7cd4edd485a95348
| 8,812
|
py
|
Python
|
custom_components/xiaomi_gateway3/__init__.py
|
avbor/HomeAssistantConf
|
1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614
|
[
"Unlicense"
] | 35
|
2021-02-25T06:30:42.000Z
|
2022-03-09T20:18:47.000Z
|
custom_components/xiaomi_gateway3/__init__.py
|
avbor/HomeAssistantConf
|
1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614
|
[
"Unlicense"
] | 33
|
2021-11-22T16:30:43.000Z
|
2022-03-29T18:00:13.000Z
|
custom_components/xiaomi_gateway3/__init__.py
|
avbor/HomeAssistantConf
|
1f0fe16c8e3f3dcea7cc350f3fb9c233b6a22614
|
[
"Unlicense"
] | 19
|
2021-02-20T05:29:58.000Z
|
2022-02-05T16:22:30.000Z
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
# update global debug_mode for all gateways
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
# AA:BB:CC:DD:EE:FF => aabbccddeeff
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
# utils.migrate_unique_id(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Support two kind of enties - MiCloud and Gateway."""
# entry for MiCloud login
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
# migrate data (also after first setup) to options
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
# add options handler
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# check unload cloud integration
if entry.entry_id not in hass.data[DOMAIN]:
return
# remove all stats entities if disable stats
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
# init setup for each supported domains
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
# load devices from or save to .storage
store = Store(hass, 1, f"{DOMAIN}/{data['username']}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
# key - mac for BLE, and did for others
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
# don't override name if exists
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
"""Remove device from Hass and Mi Home if the device is renamed to
`delete`.
"""
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device['did']}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
# disable log to console
_LOGGER.propagate = web_logs is False
# set debug if any of integrations has debug
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
# if don't set handler yet
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
| 32.758364
| 79
| 0.661825
|
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
if entry.entry_id not in hass.data[DOMAIN]:
return
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
store = Store(hass, 1, f"{DOMAIN}/{data['username']}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device['did']}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
_LOGGER.propagate = web_logs is False
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
| true
| true
|
79043d51417e1308f8af8a56c70ca9301d337d11
| 6,319
|
py
|
Python
|
mstress_plan.py
|
maismail/qfs-mstress
|
8e65b1e77b2cce6a875658a634ae217592142dab
|
[
"Apache-2.0"
] | 1
|
2017-08-21T10:57:34.000Z
|
2017-08-21T10:57:34.000Z
|
mstress_plan.py
|
maismail/qfs-mstress
|
8e65b1e77b2cce6a875658a634ae217592142dab
|
[
"Apache-2.0"
] | null | null | null |
mstress_plan.py
|
maismail/qfs-mstress
|
8e65b1e77b2cce6a875658a634ae217592142dab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# $Id$
#
# Author: Thilee Subramaniam
#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This code is used to generate a plan file for metaserver vs namenode
# benchmarking.
#
import optparse
import sys
import subprocess
import time
import os
import math
import getpass
"""
This program is used to create the directory/file layout to be used
in metaserver/namenode stress test.
You basically specify the depth of the directory tree and the number
of elements (files or directories) per level, along with the list of
client-hosts you want to use and the number of clients per client-host
that you want to use.
This script will generate the plan file, and copy it to the /tmp on the
given list of client hosts.
Thereafter, you can execute the mstress.py with this plan file.
"""
class Globals:
PATH_PREFIX = 'Dir_'
PLAN_OUTPUT = './planfile.txt'
def ParseCommandline():
epi = ('Example: "%s -c h1,h2 -n 3 -l 4 -i 3 -s 100" would create 4 levels of 3 inodes ' % sys.argv[0] +
'(3+9+27+81=120) per client process. Since there are 3 ' +
'processes on 2 hosts, we create 120x6=720 inodes. We will attempt ' +
'to stat 100 random leaf paths using all client processes. We will do a readdir ' +
'all through the directory tree.')
parser = optparse.OptionParser(epilog=epi)
parser.add_option('-c', '--client-hosts',
action='store',
default='localhost',
type='string',
help='Comma-separated list of client host names.')
parser.add_option('-n', '--clients-per-host',
action='store',
default=1,
type='int',
help='Number of clients per client host.')
parser.add_option('-l', '--levels',
action='store',
default=1,
type='int',
help='File-tree depth on each client.')
parser.add_option('-i', '--inodes-per-level',
action='store',
default=100,
type='int',
help='Inodes per each level on each client.')
parser.add_option('-t', '--path-type',
action='store',
default='dir',
type='string',
help='Whether to create "dir" or "file" inodes.')
parser.add_option('-s', '--num-to-stat',
action='store',
default=100,
type='int',
help='Number of inodes to stat (<=total leaf inodes).')
parser.add_option('-o', '--output-file',
action='store',
default=None,
type='string',
help='Output plan file.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if opts.output_file is None:
opts.output_file = '/tmp/mstress_%s_%s.plan' % (getpass.getuser(), time.strftime("%F-%H-%M-%S", time.gmtime()))
return opts
def main():
opts = ParseCommandline()
hostlist = opts.client_hosts.split(',')
numClientProcesses = float(len(hostlist) * opts.clients_per_host)
if numClientProcesses == 0.0:
sys.exit('Invalid client processes')
#get the smallest number larger than 'opts.num_to_stat' that is a multiple of opts.num_to_stat
statPerClient = int(math.ceil(float(opts.num_to_stat) / numClientProcesses))
#print opts
outfile = open(opts.output_file, 'w')
outfile.write('# *** DO NOT EDIT THIS FILE BY HAND *** \n# USE mstress_plan.py TO MODIFY INSTEAD\n#\n')
outfile.write('#List of hosts taking part in the plan\nhostslist=%s\n' % opts.client_hosts)
outfile.write('#Number of mstress cliends per client host\nclientsperhost=%d\n' % opts.clients_per_host)
outfile.write('#File or directory\ntype=%s\n' % opts.path_type)
outfile.write('#Number of levels in created tree\nlevels=%d\n' % opts.levels)
outfile.write('#Number of inodes per level\ninodes=%d\n' % opts.inodes_per_level)
outfile.write('#Number of random paths to stat, per client\nnstat=%d\n' % statPerClient)
""" old code
begin_tree_delta = 0
for level in range(0,opts.levels):
begin_tree_delta = begin_tree_delta + pow(opts.inodes_per_level, level + 1)
#print "delta = ", begin_tree_delta
outfile.write('#host\tclient\tlevel\tdistribution\n')
begin_tree_idx = 0
for host_no in range(0,len(hostlist)):
host = hostlist[host_no]
for client_no in range(0,opts.clients_per_host):
# tree for this level
begin_idx = begin_tree_idx
for level in range(0,opts.levels):
prefix = '%s\tproc_%02d\t%d\t' % (host, client_no, level)
# print '-- h=%d, c=%d level=%d, begin idx = %d' % (host_no, client_no, level, begin_idx)
suffix = ''
for ranges in range(0, pow(opts.inodes_per_level, level)):
if len(suffix) != 0:
suffix = suffix + ','
suffix = suffix + '%d-%d'%(begin_idx, begin_idx + opts.inodes_per_level - 1)
begin_idx = begin_idx + opts.inodes_per_level
outfile.write('%s\t%s\n' % (prefix, suffix))
begin_tree_idx = begin_tree_idx + begin_tree_delta
#print "next begin tree idx = ", begin_tree_idx
"""
outfile.close()
print '==> Created planfile: %s' % opts.output_file
print 'copying file %s to all client hosts' % opts.output_file
for client in hostlist:
p = subprocess.Popen(['/usr/bin/scp', os.path.abspath(opts.output_file), '%s:%s' % (client, opts.output_file)])
while 1:
ret = p.poll()
if ret == None:
time.sleep(0.5)
else:
print 'transfered %s to %s' % (opts.output_file, client)
break
if __name__ == '__main__':
main()
| 37.170588
| 115
| 0.627947
|
import optparse
import sys
import subprocess
import time
import os
import math
import getpass
"""
This program is used to create the directory/file layout to be used
in metaserver/namenode stress test.
You basically specify the depth of the directory tree and the number
of elements (files or directories) per level, along with the list of
client-hosts you want to use and the number of clients per client-host
that you want to use.
This script will generate the plan file, and copy it to the /tmp on the
given list of client hosts.
Thereafter, you can execute the mstress.py with this plan file.
"""
class Globals:
PATH_PREFIX = 'Dir_'
PLAN_OUTPUT = './planfile.txt'
def ParseCommandline():
epi = ('Example: "%s -c h1,h2 -n 3 -l 4 -i 3 -s 100" would create 4 levels of 3 inodes ' % sys.argv[0] +
'(3+9+27+81=120) per client process. Since there are 3 ' +
'processes on 2 hosts, we create 120x6=720 inodes. We will attempt ' +
'to stat 100 random leaf paths using all client processes. We will do a readdir ' +
'all through the directory tree.')
parser = optparse.OptionParser(epilog=epi)
parser.add_option('-c', '--client-hosts',
action='store',
default='localhost',
type='string',
help='Comma-separated list of client host names.')
parser.add_option('-n', '--clients-per-host',
action='store',
default=1,
type='int',
help='Number of clients per client host.')
parser.add_option('-l', '--levels',
action='store',
default=1,
type='int',
help='File-tree depth on each client.')
parser.add_option('-i', '--inodes-per-level',
action='store',
default=100,
type='int',
help='Inodes per each level on each client.')
parser.add_option('-t', '--path-type',
action='store',
default='dir',
type='string',
help='Whether to create "dir" or "file" inodes.')
parser.add_option('-s', '--num-to-stat',
action='store',
default=100,
type='int',
help='Number of inodes to stat (<=total leaf inodes).')
parser.add_option('-o', '--output-file',
action='store',
default=None,
type='string',
help='Output plan file.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if opts.output_file is None:
opts.output_file = '/tmp/mstress_%s_%s.plan' % (getpass.getuser(), time.strftime("%F-%H-%M-%S", time.gmtime()))
return opts
def main():
opts = ParseCommandline()
hostlist = opts.client_hosts.split(',')
numClientProcesses = float(len(hostlist) * opts.clients_per_host)
if numClientProcesses == 0.0:
sys.exit('Invalid client processes')
statPerClient = int(math.ceil(float(opts.num_to_stat) / numClientProcesses))
outfile = open(opts.output_file, 'w')
outfile.write('# *** DO NOT EDIT THIS FILE BY HAND *** \n# USE mstress_plan.py TO MODIFY INSTEAD\n#\n')
outfile.write('#List of hosts taking part in the plan\nhostslist=%s\n' % opts.client_hosts)
outfile.write('#Number of mstress cliends per client host\nclientsperhost=%d\n' % opts.clients_per_host)
outfile.write('#File or directory\ntype=%s\n' % opts.path_type)
outfile.write('#Number of levels in created tree\nlevels=%d\n' % opts.levels)
outfile.write('#Number of inodes per level\ninodes=%d\n' % opts.inodes_per_level)
outfile.write('#Number of random paths to stat, per client\nnstat=%d\n' % statPerClient)
""" old code
begin_tree_delta = 0
for level in range(0,opts.levels):
begin_tree_delta = begin_tree_delta + pow(opts.inodes_per_level, level + 1)
#print "delta = ", begin_tree_delta
outfile.write('#host\tclient\tlevel\tdistribution\n')
begin_tree_idx = 0
for host_no in range(0,len(hostlist)):
host = hostlist[host_no]
for client_no in range(0,opts.clients_per_host):
# tree for this level
begin_idx = begin_tree_idx
for level in range(0,opts.levels):
prefix = '%s\tproc_%02d\t%d\t' % (host, client_no, level)
# print '-- h=%d, c=%d level=%d, begin idx = %d' % (host_no, client_no, level, begin_idx)
suffix = ''
for ranges in range(0, pow(opts.inodes_per_level, level)):
if len(suffix) != 0:
suffix = suffix + ','
suffix = suffix + '%d-%d'%(begin_idx, begin_idx + opts.inodes_per_level - 1)
begin_idx = begin_idx + opts.inodes_per_level
outfile.write('%s\t%s\n' % (prefix, suffix))
begin_tree_idx = begin_tree_idx + begin_tree_delta
#print "next begin tree idx = ", begin_tree_idx
"""
outfile.close()
print '==> Created planfile: %s' % opts.output_file
print 'copying file %s to all client hosts' % opts.output_file
for client in hostlist:
p = subprocess.Popen(['/usr/bin/scp', os.path.abspath(opts.output_file), '%s:%s' % (client, opts.output_file)])
while 1:
ret = p.poll()
if ret == None:
time.sleep(0.5)
else:
print 'transfered %s to %s' % (opts.output_file, client)
break
if __name__ == '__main__':
main()
| false
| true
|
79043ec4bdb5af351b26174d623180bca22dd415
| 3,151
|
py
|
Python
|
models/BiLSTM.py
|
ahmedtolan23/NER-with-bilstm-CRF-CNN
|
29db9f2e357fc4112f9b5752f8ec604e4b9a04b0
|
[
"Apache-2.0"
] | 4
|
2019-11-07T08:03:22.000Z
|
2020-07-22T07:15:44.000Z
|
models/BiLSTM.py
|
ahmedtolan23/NER-with-bilstm-CRF-CNN
|
29db9f2e357fc4112f9b5752f8ec604e4b9a04b0
|
[
"Apache-2.0"
] | null | null | null |
models/BiLSTM.py
|
ahmedtolan23/NER-with-bilstm-CRF-CNN
|
29db9f2e357fc4112f9b5752f8ec604e4b9a04b0
|
[
"Apache-2.0"
] | 1
|
2019-09-11T19:54:37.000Z
|
2019-09-11T19:54:37.000Z
|
"""
FILE : BiLSTM.py
FUNCTION : None
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from DataUtils.Common import *
from models.initialize import *
from models.modelHelp import prepare_pack_padded_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)
class BiLSTM(nn.Module):
"""
BiLSTM
"""
def __init__(self, **kwargs):
super(BiLSTM, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
V = self.embed_num
D = self.embed_dim
C = self.label_num
paddingId = self.paddingId
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if self.pretrained_embed:
self.embed.weight.data.copy_(self.pretrained_weight)
else:
init_embedding(self.embed.weight)
self.dropout_embed = nn.Dropout(self.dropout_emb)
self.dropout = nn.Dropout(self.dropout)
self.bilstm = nn.LSTM(input_size=D, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
bidirectional=True, batch_first=True, bias=True)
self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
init_linear(self.linear)
def forward(self, word, sentence_length):
"""
:param word:
:param sentence_length:
:param desorted_indices:
:return:
"""
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word) # (N,W,D)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
class BiLSTM(nn.Module):
def __init__(self, vocab_size, emb_size, hidden_size, out_size):
""":
vocab_size:
emb_size:
hidden_size:
out_size:
"""
super(BiLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size,
batch_first=True,
bidirectional=True)
self.lin = nn.Linear(2*hidden_size, out_size)
def forward(self, sents_tensor, lengths):
emb = self.embedding(sents_tensor) # [B, L, emb_size]
packed = pack_padded_sequence(emb, lengths, batch_first=True)
rnn_out, _ = self.bilstm(packed)
# rnn_out:[B, L, hidden_size*2]
rnn_out, _ = pad_packed_sequence(rnn_out, batch_first=True)
scores = self.lin(rnn_out) # [B, L, out_size]
return scores
def test(self, sents_tensor, lengths, _):
logits = self.forward(sents_tensor, lengths) # [B, L, out_size]
_, batch_tagids = torch.max(logits, dim=2)
return batch_tagids
| 30.892157
| 121
| 0.623294
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from DataUtils.Common import *
from models.initialize import *
from models.modelHelp import prepare_pack_padded_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)
class BiLSTM(nn.Module):
def __init__(self, **kwargs):
super(BiLSTM, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
V = self.embed_num
D = self.embed_dim
C = self.label_num
paddingId = self.paddingId
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if self.pretrained_embed:
self.embed.weight.data.copy_(self.pretrained_weight)
else:
init_embedding(self.embed.weight)
self.dropout_embed = nn.Dropout(self.dropout_emb)
self.dropout = nn.Dropout(self.dropout)
self.bilstm = nn.LSTM(input_size=D, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
bidirectional=True, batch_first=True, bias=True)
self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
init_linear(self.linear)
def forward(self, word, sentence_length):
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
class BiLSTM(nn.Module):
def __init__(self, vocab_size, emb_size, hidden_size, out_size):
super(BiLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size,
batch_first=True,
bidirectional=True)
self.lin = nn.Linear(2*hidden_size, out_size)
def forward(self, sents_tensor, lengths):
emb = self.embedding(sents_tensor)
packed = pack_padded_sequence(emb, lengths, batch_first=True)
rnn_out, _ = self.bilstm(packed)
rnn_out, _ = pad_packed_sequence(rnn_out, batch_first=True)
scores = self.lin(rnn_out)
return scores
def test(self, sents_tensor, lengths, _):
logits = self.forward(sents_tensor, lengths)
_, batch_tagids = torch.max(logits, dim=2)
return batch_tagids
| true
| true
|
790442b0c52017836428ec0e97f3379e298f3e64
| 10,389
|
py
|
Python
|
src/_pytest/compat.py
|
robholt/pytest
|
307652202c7fa83369f5abfd822a408d4cc32d14
|
[
"MIT"
] | null | null | null |
src/_pytest/compat.py
|
robholt/pytest
|
307652202c7fa83369f5abfd822a408d4cc32d14
|
[
"MIT"
] | null | null | null |
src/_pytest/compat.py
|
robholt/pytest
|
307652202c7fa83369f5abfd822a408d4cc32d14
|
[
"MIT"
] | null | null | null |
"""
python version compatibility code
"""
import functools
import inspect
import io
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
import attr
import py
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
NOTSET = object()
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata # noqa: F401
else:
import importlib_metadata # noqa: F401
def _format_args(func):
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""
Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def getlocation(function, curdir=None):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None and fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(function, *, name: str = "", is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext # noqa
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> '\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr_name in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr_name, getattr(pytest, attr_name))
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
| 29.682857
| 106
| 0.677544
|
import functools
import inspect
import io
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
import attr
import py
import _pytest
from _pytest._io.saferepr import saferepr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
NOTSET = object()
MODULE_NOT_FOUND_ERROR = (
"ModuleNotFoundError" if sys.version_info[:2] >= (3, 6) else "ImportError"
)
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata
def _format_args(func):
return str(signature(func))
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def getlocation(function, curdir=None):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None and fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(function, *, name: str = "", is_method=False, cls=None):
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext
def get_default_arg_names(function):
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s):
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val):
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
obj = attr.ib()
def get_real_func(obj):
start_obj = obj
for i in range(100):
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
try:
return inspect.isclass(obj)
except Exception:
return False
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr_name in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr_name, getattr(pytest, attr_name))
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr:
@property
def funcargnames(self):
import warnings
from _pytest.deprecated import FUNCARGNAMES
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
| true
| true
|
790443bcc464aae5b19fa76b5861348d20e9ceed
| 118
|
py
|
Python
|
certbot/certbot/__init__.py
|
4n3i5v74/certbot
|
98678158637281069d180b789c80c00e5f182670
|
[
"Apache-2.0"
] | 1
|
2021-09-12T08:53:17.000Z
|
2021-09-12T08:53:17.000Z
|
certbot/certbot/__init__.py
|
4n3i5v74/certbot
|
98678158637281069d180b789c80c00e5f182670
|
[
"Apache-2.0"
] | null | null | null |
certbot/certbot/__init__.py
|
4n3i5v74/certbot
|
98678158637281069d180b789c80c00e5f182670
|
[
"Apache-2.0"
] | null | null | null |
"""Certbot client."""
# version number like 1.2.3a0, must have at least 2 parts, like 1.2
__version__ = '1.14.0.dev0'
| 29.5
| 67
| 0.686441
|
__version__ = '1.14.0.dev0'
| true
| true
|
790444e3e505a040fcba3a3fb688e1a86ada25b7
| 3,542
|
py
|
Python
|
ssseg/cfgs/emanet/base_cfg.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | 1
|
2021-05-28T06:42:37.000Z
|
2021-05-28T06:42:37.000Z
|
ssseg/cfgs/emanet/base_cfg.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
ssseg/cfgs/emanet/base_cfg.py
|
skydengyao/sssegmentation
|
606b05983fa967bb3c98d1120f44dfc516532dad
|
[
"MIT"
] | null | null | null |
'''base config for emanet'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'filter_params': True,
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_aux': {
'celoss': {'scale_factor': 0.4, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
}
# config for model
MODEL_CFG = {
'type': 'emanet',
'num_classes': -1,
'benchmark': True,
'is_multi_gpus': True,
'align_corners': False,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 8,
'use_stem': True,
'selected_indices': (2, 3),
},
'ema': {
'in_channels': 2048,
'ema_channels': 512,
'momentum': 0.1,
'num_stages': 3,
'num_bases': 64,
},
'decoder': {
'in_channels': 2560,
'out_channels': 512,
'dropout': 0.1,
},
'auxiliary': {
'in_channels': 1024,
'out_channels': 512,
'dropout': 0.1,
}
}
# config for inference
INFERENCE_CFG = {
'mode': 'whole',
'opts': {},
'tricks': {
'multiscale': [1],
'flip': False,
'use_probs_before_resize': False
}
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
}
| 27.457364
| 109
| 0.474873
|
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'filter_params': True,
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
LOSSES_CFG = {
'loss_aux': {
'celoss': {'scale_factor': 0.4, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
}
MODEL_CFG = {
'type': 'emanet',
'num_classes': -1,
'benchmark': True,
'is_multi_gpus': True,
'align_corners': False,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 8,
'use_stem': True,
'selected_indices': (2, 3),
},
'ema': {
'in_channels': 2048,
'ema_channels': 512,
'momentum': 0.1,
'num_stages': 3,
'num_bases': 64,
},
'decoder': {
'in_channels': 2560,
'out_channels': 512,
'dropout': 0.1,
},
'auxiliary': {
'in_channels': 1024,
'out_channels': 512,
'dropout': 0.1,
}
}
INFERENCE_CFG = {
'mode': 'whole',
'opts': {},
'tricks': {
'multiscale': [1],
'flip': False,
'use_probs_before_resize': False
}
}
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
}
| true
| true
|
790445fcab73394f2d74226dbab7d47a788a78c3
| 283
|
py
|
Python
|
albumentations/__init__.py
|
BelBES/albumentations
|
2f83c3c29ae34bfd4f199e660a1174d6db1a0017
|
[
"MIT"
] | null | null | null |
albumentations/__init__.py
|
BelBES/albumentations
|
2f83c3c29ae34bfd4f199e660a1174d6db1a0017
|
[
"MIT"
] | null | null | null |
albumentations/__init__.py
|
BelBES/albumentations
|
2f83c3c29ae34bfd4f199e660a1174d6db1a0017
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
__version__ = '0.3.3'
from .core.composition import *
from .core.transforms_interface import *
from .core.serialization import *
from .augmentations.transforms import *
from .augmentations.bbox_utils import *
from .imgaug.transforms import *
| 25.727273
| 40
| 0.80212
|
from __future__ import absolute_import
__version__ = '0.3.3'
from .core.composition import *
from .core.transforms_interface import *
from .core.serialization import *
from .augmentations.transforms import *
from .augmentations.bbox_utils import *
from .imgaug.transforms import *
| true
| true
|
790446b75682ad2f6a858e190999f72a97b4085c
| 44,670
|
py
|
Python
|
datasets/tensorflow-1.0.1/tensorflow/python/ops/rnn.py
|
yijunyu/demo-fast
|
11c0c84081a3181494b9c469bda42a313c457ad2
|
[
"BSD-2-Clause"
] | 6
|
2019-03-24T05:02:03.000Z
|
2021-12-26T13:00:24.000Z
|
datasets/tensorflow-1.0.1/tensorflow/python/ops/rnn.py
|
yijunyu/demo-vscode-fast
|
11c0c84081a3181494b9c469bda42a313c457ad2
|
[
"BSD-2-Clause"
] | null | null | null |
datasets/tensorflow-1.0.1/tensorflow/python/ops/rnn.py
|
yijunyu/demo-vscode-fast
|
11c0c84081a3181494b9c469bda42a313c457ad2
|
[
"BSD-2-Clause"
] | 1
|
2018-06-13T09:03:18.000Z
|
2018-06-13T09:03:18.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
# pylint: disable=protected-access
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
# pylint: enable=protected-access
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
| 42.787356
| 80
| 0.701903
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
def _infer_state_dtype(explicit_dtype, state):
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
if device:
with ops.device(device):
return fn()
else:
return fn()
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
lambda: _copy_some_through(flat_new_output, flat_new_state))
if skip_conditionals:
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
time >= max_sequence_length, empty_update,
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
s_joined = array_ops.stack(sequence)
if lengths is not None:
lengths = math_ops.to_int64(lengths)
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
with vs.variable_scope(scope or "bidirectional_rnn"):
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
flat_input = nest.flatten(inputs)
if not time_major:
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity(
sequence_length, name="sequence_length")
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
if not time_major:
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
input_t = tuple(ta.read(time) for ta in input_ta)
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
final_outputs = tuple(ta.stack() for ta in output_final_ta)
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None)
flat_input = nest.flatten(next_input)
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.