body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def Update(self, request, context): 'Update an existing organization.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
7,422,785,294,231,557,000
Update an existing organization.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
Update
GaiaFL/chirpstack-api
python
def Update(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def Delete(self, request, context): 'Delete an organization.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
6,167,222,203,331,012,000
Delete an organization.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
Delete
GaiaFL/chirpstack-api
python
def Delete(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def ListUsers(self, request, context): "Get organization's user list.\n " context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
-307,360,098,556,259,500
Get organization's user list.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
ListUsers
GaiaFL/chirpstack-api
python
def ListUsers(self, request, context): "\n " context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def GetUser(self, request, context): 'Get data for a particular organization user.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
-4,923,811,219,087,573,000
Get data for a particular organization user.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
GetUser
GaiaFL/chirpstack-api
python
def GetUser(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def AddUser(self, request, context): 'Add a new user to an organization.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
-4,596,004,449,455,495,700
Add a new user to an organization.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
AddUser
GaiaFL/chirpstack-api
python
def AddUser(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def UpdateUser(self, request, context): 'Update a user in an organization.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
4,537,644,656,659,038,000
Update a user in an organization.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
UpdateUser
GaiaFL/chirpstack-api
python
def UpdateUser(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def DeleteUser(self, request, context): 'Delete a user from an organization.\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
-8,003,106,685,235,128,000
Delete a user from an organization.
python/src/chirpstack_api/as_pb/external/api/organization_pb2_grpc.py
DeleteUser
GaiaFL/chirpstack-api
python
def DeleteUser(self, request, context): '\n ' context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
def has_pywin32(): "\n Does this environment have pywin32?\n Should return False even when Mercurial's Demand Import allowed import of\n win32cred.\n " with ExceptionRaisedContext() as exc: win32cred.__name__ return (not bool(exc))
-7,351,007,536,469,651,000
Does this environment have pywin32? Should return False even when Mercurial's Demand Import allowed import of win32cred.
src/site-packages/keyrings/alt/Windows.py
has_pywin32
nficano/alexa-find-my-iphone
python
def has_pywin32(): "\n Does this environment have pywin32?\n Should return False even when Mercurial's Demand Import allowed import of\n win32cred.\n " with ExceptionRaisedContext() as exc: win32cred.__name__ return (not bool(exc))
def has_wincrypto(): "\n Does this environment have wincrypto?\n Should return False even when Mercurial's Demand Import allowed import of\n _win_crypto, so accesses an attribute of the module.\n " with ExceptionRaisedContext() as exc: _win_crypto.__name__ return (not bool(exc))
-6,387,045,016,494,770,000
Does this environment have wincrypto? Should return False even when Mercurial's Demand Import allowed import of _win_crypto, so accesses an attribute of the module.
src/site-packages/keyrings/alt/Windows.py
has_wincrypto
nficano/alexa-find-my-iphone
python
def has_wincrypto(): "\n Does this environment have wincrypto?\n Should return False even when Mercurial's Demand Import allowed import of\n _win_crypto, so accesses an attribute of the module.\n " with ExceptionRaisedContext() as exc: _win_crypto.__name__ return (not bool(exc))
@properties.ClassProperty @classmethod def priority(self): '\n Preferred over file.EncryptedKeyring but not other, more sophisticated\n Windows backends.\n ' if (not (platform.system() == 'Windows')): raise RuntimeError('Requires Windows') return 0.8
1,551,983,717,609,327,600
Preferred over file.EncryptedKeyring but not other, more sophisticated Windows backends.
src/site-packages/keyrings/alt/Windows.py
priority
nficano/alexa-find-my-iphone
python
@properties.ClassProperty @classmethod def priority(self): '\n Preferred over file.EncryptedKeyring but not other, more sophisticated\n Windows backends.\n ' if (not (platform.system() == 'Windows')): raise RuntimeError('Requires Windows') return 0.8
def encrypt(self, password): 'Encrypt the password using the CryptAPI.\n ' return _win_crypto.encrypt(password)
-5,918,730,288,420,528,000
Encrypt the password using the CryptAPI.
src/site-packages/keyrings/alt/Windows.py
encrypt
nficano/alexa-find-my-iphone
python
def encrypt(self, password): '\n ' return _win_crypto.encrypt(password)
def decrypt(self, password_encrypted): 'Decrypt the password using the CryptAPI.\n ' return _win_crypto.decrypt(password_encrypted)
-1,593,266,222,747,378,200
Decrypt the password using the CryptAPI.
src/site-packages/keyrings/alt/Windows.py
decrypt
nficano/alexa-find-my-iphone
python
def decrypt(self, password_encrypted): '\n ' return _win_crypto.decrypt(password_encrypted)
@properties.ClassProperty @classmethod def priority(self): "\n Preferred on Windows when pywin32 isn't installed\n " if (platform.system() != 'Windows'): raise RuntimeError('Requires Windows') if (not has_wincrypto()): raise RuntimeError('Requires ctypes') return 2
1,881,900,591,926,196,200
Preferred on Windows when pywin32 isn't installed
src/site-packages/keyrings/alt/Windows.py
priority
nficano/alexa-find-my-iphone
python
@properties.ClassProperty @classmethod def priority(self): "\n \n " if (platform.system() != 'Windows'): raise RuntimeError('Requires Windows') if (not has_wincrypto()): raise RuntimeError('Requires ctypes') return 2
def get_password(self, service, username): 'Get password of the username for the service\n ' try: key = ('Software\\%s\\Keyring' % service) hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) password_saved = winreg.QueryValueEx(hkey, username)[0] password_base64 = password_saved.encode('ascii') password_encrypted = base64.decodestring(password_base64) password = _win_crypto.decrypt(password_encrypted).decode('utf-8') except EnvironmentError: password = None return password
-6,747,333,599,557,987,000
Get password of the username for the service
src/site-packages/keyrings/alt/Windows.py
get_password
nficano/alexa-find-my-iphone
python
def get_password(self, service, username): '\n ' try: key = ('Software\\%s\\Keyring' % service) hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) password_saved = winreg.QueryValueEx(hkey, username)[0] password_base64 = password_saved.encode('ascii') password_encrypted = base64.decodestring(password_base64) password = _win_crypto.decrypt(password_encrypted).decode('utf-8') except EnvironmentError: password = None return password
def set_password(self, service, username, password): 'Write the password to the registry\n ' password_encrypted = _win_crypto.encrypt(password.encode('utf-8')) password_base64 = base64.encodestring(password_encrypted) password_saved = password_base64.decode('ascii') key_name = ('Software\\%s\\Keyring' % service) hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name) winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
6,056,869,806,802,730,000
Write the password to the registry
src/site-packages/keyrings/alt/Windows.py
set_password
nficano/alexa-find-my-iphone
python
def set_password(self, service, username, password): '\n ' password_encrypted = _win_crypto.encrypt(password.encode('utf-8')) password_base64 = base64.encodestring(password_encrypted) password_saved = password_base64.decode('ascii') key_name = ('Software\\%s\\Keyring' % service) hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name) winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
def delete_password(self, service, username): 'Delete the password for the username of the service.\n ' try: key_name = ('Software\\%s\\Keyring' % service) hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0, winreg.KEY_ALL_ACCESS) winreg.DeleteValue(hkey, username) winreg.CloseKey(hkey) except WindowsError: e = sys.exc_info()[1] raise PasswordDeleteError(e) self._delete_key_if_empty(service)
-3,171,431,112,783,877,600
Delete the password for the username of the service.
src/site-packages/keyrings/alt/Windows.py
delete_password
nficano/alexa-find-my-iphone
python
def delete_password(self, service, username): '\n ' try: key_name = ('Software\\%s\\Keyring' % service) hkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_name, 0, winreg.KEY_ALL_ACCESS) winreg.DeleteValue(hkey, username) winreg.CloseKey(hkey) except WindowsError: e = sys.exc_info()[1] raise PasswordDeleteError(e) self._delete_key_if_empty(service)
def __contains__(self, t): ' t in this ' for (k, v) in self.__dict__.items(): if (k == t): return True if isinstance(v, DictToAttrDeep): if (t in v): return True
-3,873,695,675,860,616,700
t in this
rulemanager.py
__contains__
n44hernandezp/openschc
python
def __contains__(self, t): ' ' for (k, v) in self.__dict__.items(): if (k == t): return True if isinstance(v, DictToAttrDeep): if (t in v): return True
def __getitem__(self, t): ' this[k] ' for (k, v) in self.__dict__.items(): if (k == t): return v if isinstance(v, DictToAttrDeep): if (t in v): return v[t]
521,882,756,960,341,760
this[k]
rulemanager.py
__getitem__
n44hernandezp/openschc
python
def __getitem__(self, t): ' ' for (k, v) in self.__dict__.items(): if (k == t): return v if isinstance(v, DictToAttrDeep): if (t in v): return v[t]
def get(self, k, d=None): ' this.get(k) ' if (k not in self): return d return self.__getitem__(k)
-6,245,523,393,949,625,000
this.get(k)
rulemanager.py
get
n44hernandezp/openschc
python
def get(self, k, d=None): ' ' if (k not in self): return d return self.__getitem__(k)
def _checkRuleValue(self, rule_id, rule_id_length): 'this function looks if bits specified in ruleID are not outside of\n rule_id_length' if (rule_id_length > 32): raise ValueError('Rule length should be less than 32') r1 = rule_id for k in range(32, rule_id_length, (- 1)): if (((1 << k) & r1) != 0): raise ValueError('rule ID too long')
7,033,565,771,389,573,000
this function looks if bits specified in ruleID are not outside of rule_id_length
rulemanager.py
_checkRuleValue
n44hernandezp/openschc
python
def _checkRuleValue(self, rule_id, rule_id_length): 'this function looks if bits specified in ruleID are not outside of\n rule_id_length' if (rule_id_length > 32): raise ValueError('Rule length should be less than 32') r1 = rule_id for k in range(32, rule_id_length, (- 1)): if (((1 << k) & r1) != 0): raise ValueError('rule ID too long')
def _ruleIncluded(self, r1ID, r1l, r2ID, r2l): 'check if a conflict exists between to ruleID (i.e. same first bits equals) ' r1 = (r1ID << (32 - r1l)) r2 = (r2ID << (32 - r2l)) l = min(r1l, r2l) for k in range((32 - l), 32): if ((r1 & (1 << k)) != (r2 & (1 << k))): return False return True
4,394,786,761,340,157,400
check if a conflict exists between to ruleID (i.e. same first bits equals)
rulemanager.py
_ruleIncluded
n44hernandezp/openschc
python
def _ruleIncluded(self, r1ID, r1l, r2ID, r2l): ' ' r1 = (r1ID << (32 - r1l)) r2 = (r2ID << (32 - r2l)) l = min(r1l, r2l) for k in range((32 - l), 32): if ((r1 & (1 << k)) != (r2 & (1 << k))): return False return True
def find_rule_bypacket(self, context, packet_bbuf): ' returns a compression rule or an fragmentation rule\n in the context matching with the field value of rule id in the packet.\n ' for k in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']: r = context.get(k) if (r is not None): rule_id = packet_bbuf.get_bits(r['ruleLength'], position=0) if (r['ruleID'] == rule_id): print('--------------------RuleManage------------------') print('ruleID ', rule_id) print() print('--------------------------------------------------') return (k, r) return (None, None)
-8,728,772,641,255,952,000
returns a compression rule or an fragmentation rule in the context matching with the field value of rule id in the packet.
rulemanager.py
find_rule_bypacket
n44hernandezp/openschc
python
def find_rule_bypacket(self, context, packet_bbuf): ' returns a compression rule or an fragmentation rule\n in the context matching with the field value of rule id in the packet.\n ' for k in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']: r = context.get(k) if (r is not None): rule_id = packet_bbuf.get_bits(r['ruleLength'], position=0) if (r['ruleID'] == rule_id): print('--------------------RuleManage------------------') print('ruleID ', rule_id) print() print('--------------------------------------------------') return (k, r) return (None, None)
def find_context_bydevL2addr(self, dev_L2addr): ' find a context with dev_L2addr. ' for c in self._db: if (c['devL2Addr'] == dev_L2addr): return c if (c['devL2Addr'] == '*'): return c return None
6,848,620,013,530,425,000
find a context with dev_L2addr.
rulemanager.py
find_context_bydevL2addr
n44hernandezp/openschc
python
def find_context_bydevL2addr(self, dev_L2addr): ' ' for c in self._db: if (c['devL2Addr'] == dev_L2addr): return c if (c['devL2Addr'] == '*'): return c return None
def find_context_bydstiid(self, dst_iid): ' find a context with dst_iid, which can be a wild card. ' for c in self._db: if (c['dstIID'] == dst_iid): return c if (c['dstIID'] == '*'): return c return None
-5,258,360,269,052,741,000
find a context with dst_iid, which can be a wild card.
rulemanager.py
find_context_bydstiid
n44hernandezp/openschc
python
def find_context_bydstiid(self, dst_iid): ' ' for c in self._db: if (c['dstIID'] == dst_iid): return c if (c['dstIID'] == '*'): return c return None
def find_context_exact(self, dev_L2addr, dst_iid): ' find a context by both devL2Addr and dstIID.\n This is mainly for internal use. ' for c in self._db: if ((c['devL2Addr'] == dev_L2addr) and (c['dstIID'] == dst_iid)): return c return None
7,843,116,249,667,408,000
find a context by both devL2Addr and dstIID. This is mainly for internal use.
rulemanager.py
find_context_exact
n44hernandezp/openschc
python
def find_context_exact(self, dev_L2addr, dst_iid): ' find a context by both devL2Addr and dstIID.\n This is mainly for internal use. ' for c in self._db: if ((c['devL2Addr'] == dev_L2addr) and (c['dstIID'] == dst_iid)): return c return None
def add_context(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None): ' add context into the db. ' if (self.find_context_exact(context['devL2Addr'], context['dstIID']) is not None): raise ValueError('the context {}/{} exist.'.format(context['devL2Addr'], context['dstIID'])) c = deepcopy(context) self._db.append(c) self.add_rules(c, comp, fragSender, fragReceiver, fragSender2, fragReceiver2)
4,446,036,947,611,640,300
add context into the db.
rulemanager.py
add_context
n44hernandezp/openschc
python
def add_context(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None): ' ' if (self.find_context_exact(context['devL2Addr'], context['dstIID']) is not None): raise ValueError('the context {}/{} exist.'.format(context['devL2Addr'], context['dstIID'])) c = deepcopy(context) self._db.append(c) self.add_rules(c, comp, fragSender, fragReceiver, fragSender2, fragReceiver2)
def add_rules(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None): ' add rules into the context specified. ' if (comp is not None): self.add_rule(context, 'comp', comp) if (fragSender is not None): self.add_rule(context, 'fragSender', fragSender) if (fragReceiver is not None): self.add_rule(context, 'fragReceiver', fragReceiver) if (fragSender2 is not None): self.add_rule(context, 'fragSender2', fragSender2) if (fragReceiver2 is not None): self.add_rule(context, 'fragReceiver2', fragReceiver2)
3,261,218,258,141,239,300
add rules into the context specified.
rulemanager.py
add_rules
n44hernandezp/openschc
python
def add_rules(self, context, comp=None, fragSender=None, fragReceiver=None, fragSender2=None, fragReceiver2=None): ' ' if (comp is not None): self.add_rule(context, 'comp', comp) if (fragSender is not None): self.add_rule(context, 'fragSender', fragSender) if (fragReceiver is not None): self.add_rule(context, 'fragReceiver', fragReceiver) if (fragSender2 is not None): self.add_rule(context, 'fragSender2', fragSender2) if (fragReceiver2 is not None): self.add_rule(context, 'fragReceiver2', fragReceiver2)
def add_rule(self, context, key, rule): ' Check rule integrity and uniqueless and add it to the db ' if (not ('ruleID' in rule)): raise ValueError('Rule ID not defined.') if (not ('ruleLength' in rule)): if (rule['ruleID'] < 255): rule['ruleLength'] = 8 else: raise ValueError('RuleID too large for default size on a byte') if (key == 'comp'): self.check_rule_compression(rule) elif (key in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']): self.check_rule_fragmentation(rule) else: raise ValueError('key must be either comp, fragSender, fragReceiver, fragSender2, fragReceiver2') rule_id = rule['ruleID'] rule_id_length = rule['ruleLength'] self._checkRuleValue(rule_id, rule_id_length) for k in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']: r = context.get(k) if (r is not None): if ((rule_id_length == r.ruleLength) and (rule_id == r.ruleID)): raise ValueError('Rule {}/{} exists'.format(rule_id, rule_id_length)) context[key] = DictToAttrDeep(**rule)
7,021,694,655,899,434,000
Check rule integrity and uniqueless and add it to the db
rulemanager.py
add_rule
n44hernandezp/openschc
python
def add_rule(self, context, key, rule): ' ' if (not ('ruleID' in rule)): raise ValueError('Rule ID not defined.') if (not ('ruleLength' in rule)): if (rule['ruleID'] < 255): rule['ruleLength'] = 8 else: raise ValueError('RuleID too large for default size on a byte') if (key == 'comp'): self.check_rule_compression(rule) elif (key in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']): self.check_rule_fragmentation(rule) else: raise ValueError('key must be either comp, fragSender, fragReceiver, fragSender2, fragReceiver2') rule_id = rule['ruleID'] rule_id_length = rule['ruleLength'] self._checkRuleValue(rule_id, rule_id_length) for k in ['fragSender', 'fragReceiver', 'fragSender2', 'fragReceiver2', 'comp']: r = context.get(k) if (r is not None): if ((rule_id_length == r.ruleLength) and (rule_id == r.ruleID)): raise ValueError('Rule {}/{} exists'.format(rule_id, rule_id_length)) context[key] = DictToAttrDeep(**rule)
def check_rule_compression(self, rule): ' compression rule check ' if ((not ('compression' in rule)) or ('fragmentation' in rule)): raise ValueError('{} Invalid rule'.format(self._nameRule(rule))) canon_rule_set = [] for r in rule['compression']['rule_set']: canon_r = {} for (k, v) in r.items(): if isinstance(v, str): canon_r[k.upper()] = v.upper() else: canon_r[k.upper()] = v canon_rule_set.append(canon_r) rule['compression']['rule_set'] = canon_rule_set
2,203,879,224,705,384,400
compression rule check
rulemanager.py
check_rule_compression
n44hernandezp/openschc
python
def check_rule_compression(self, rule): ' ' if ((not ('compression' in rule)) or ('fragmentation' in rule)): raise ValueError('{} Invalid rule'.format(self._nameRule(rule))) canon_rule_set = [] for r in rule['compression']['rule_set']: canon_r = {} for (k, v) in r.items(): if isinstance(v, str): canon_r[k.upper()] = v.upper() else: canon_r[k.upper()] = v canon_rule_set.append(canon_r) rule['compression']['rule_set'] = canon_rule_set
def check_rule_fragmentation(self, rule): ' fragmentation rule check ' if ((not ('fragmentation' in rule)) or ('compression' in rule)): raise ValueError('{} Invalid rule'.format(self._nameRule(rule))) if ('fragmentation' in rule): fragRule = rule['fragmentation'] if (not ('FRMode' in fragRule)): raise ValueError('{} Fragmentation mode must be specified'.format(self._nameRule(rule))) mode = fragRule['FRMode'] if (not (mode in ('noAck', 'ackAlways', 'ackOnError'))): raise ValueError('{} Unknown fragmentation mode'.format(self._nameRule(rule))) if (not ('FRModeProfile' in fragRule)): fragRule['FRModeProfile'] = {} profile = fragRule['FRModeProfile'] if (not ('dtagSize' in profile)): profile['dtagSize'] = 0 if (not ('WSize' in profile)): if (mode == 'noAck'): profile['WSize'] = 0 elif (mode == 'ackAlways'): profile['WSize'] = 1 elif (mode == 'ackOnError'): profile['WSize'] = 5 if (not ('FCNSize' in profile)): if (mode == 'noAck'): profile['FCNSize'] = 1 elif (mode == 'ackAlways'): profile['FCNSize'] = 3 elif (mode == 'ackOnError'): profile['FCNSize'] = 3 if ('windowSize' in profile): if ((profile['windowSize'] > ((1 << profile['FCNSize']) - 1)) or (profile['windowSize'] < 0)): raise ValueError('{} illegal windowSize'.format(self._nameRule(rule))) else: profile['windowSize'] = ((1 << profile['FCNSize']) - 1) if (mode == 'ackOnError'): if (not ('ackBehavior' in profile)): raise ValueError('Ack on error behavior must be specified (afterAll1 or afterAll0)') if (not ('tileSize' in profile)): profile['tileSize'] = 64
-3,439,352,066,574,126,600
fragmentation rule check
rulemanager.py
check_rule_fragmentation
n44hernandezp/openschc
python
def check_rule_fragmentation(self, rule): ' ' if ((not ('fragmentation' in rule)) or ('compression' in rule)): raise ValueError('{} Invalid rule'.format(self._nameRule(rule))) if ('fragmentation' in rule): fragRule = rule['fragmentation'] if (not ('FRMode' in fragRule)): raise ValueError('{} Fragmentation mode must be specified'.format(self._nameRule(rule))) mode = fragRule['FRMode'] if (not (mode in ('noAck', 'ackAlways', 'ackOnError'))): raise ValueError('{} Unknown fragmentation mode'.format(self._nameRule(rule))) if (not ('FRModeProfile' in fragRule)): fragRule['FRModeProfile'] = {} profile = fragRule['FRModeProfile'] if (not ('dtagSize' in profile)): profile['dtagSize'] = 0 if (not ('WSize' in profile)): if (mode == 'noAck'): profile['WSize'] = 0 elif (mode == 'ackAlways'): profile['WSize'] = 1 elif (mode == 'ackOnError'): profile['WSize'] = 5 if (not ('FCNSize' in profile)): if (mode == 'noAck'): profile['FCNSize'] = 1 elif (mode == 'ackAlways'): profile['FCNSize'] = 3 elif (mode == 'ackOnError'): profile['FCNSize'] = 3 if ('windowSize' in profile): if ((profile['windowSize'] > ((1 << profile['FCNSize']) - 1)) or (profile['windowSize'] < 0)): raise ValueError('{} illegal windowSize'.format(self._nameRule(rule))) else: profile['windowSize'] = ((1 << profile['FCNSize']) - 1) if (mode == 'ackOnError'): if (not ('ackBehavior' in profile)): raise ValueError('Ack on error behavior must be specified (afterAll1 or afterAll0)') if (not ('tileSize' in profile)): profile['tileSize'] = 64
def adjust_bbox(fig, format, bbox_inches): '\n Temporarily adjust the figure so that only the specified area\n (bbox_inches) is saved.\n\n It modifies fig.bbox, fig.bbox_inches,\n fig.transFigure._boxout, and fig.patch. While the figure size\n changes, the scale of the original figure is conserved. A\n function which restores the original values are returned.\n ' origBbox = fig.bbox origBboxInches = fig.bbox_inches _boxout = fig.transFigure._boxout asp_list = [] locator_list = [] for ax in fig.axes: pos = ax.get_position(original=False).frozen() locator_list.append(ax.get_axes_locator()) asp_list.append(ax.get_aspect()) def _l(a, r, pos=pos): return pos ax.set_axes_locator(_l) ax.set_aspect('auto') def restore_bbox(): for (ax, asp, loc) in zip(fig.axes, asp_list, locator_list): ax.set_aspect(asp) ax.set_axes_locator(loc) fig.bbox = origBbox fig.bbox_inches = origBboxInches fig.transFigure._boxout = _boxout fig.transFigure.invalidate() fig.patch.set_bounds(0, 0, 1, 1) adjust_bbox_handler = _adjust_bbox_handler_d.get(format) if (adjust_bbox_handler is not None): adjust_bbox_handler(fig, bbox_inches) return restore_bbox else: warnings.warn(('bbox_inches option for %s backend is not implemented yet.' % format)) return None
8,733,391,175,678,230,000
Temporarily adjust the figure so that only the specified area (bbox_inches) is saved. It modifies fig.bbox, fig.bbox_inches, fig.transFigure._boxout, and fig.patch. While the figure size changes, the scale of the original figure is conserved. A function which restores the original values are returned.
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py
adjust_bbox
mattl1598/Project-Mochachino
python
def adjust_bbox(fig, format, bbox_inches): '\n Temporarily adjust the figure so that only the specified area\n (bbox_inches) is saved.\n\n It modifies fig.bbox, fig.bbox_inches,\n fig.transFigure._boxout, and fig.patch. While the figure size\n changes, the scale of the original figure is conserved. A\n function which restores the original values are returned.\n ' origBbox = fig.bbox origBboxInches = fig.bbox_inches _boxout = fig.transFigure._boxout asp_list = [] locator_list = [] for ax in fig.axes: pos = ax.get_position(original=False).frozen() locator_list.append(ax.get_axes_locator()) asp_list.append(ax.get_aspect()) def _l(a, r, pos=pos): return pos ax.set_axes_locator(_l) ax.set_aspect('auto') def restore_bbox(): for (ax, asp, loc) in zip(fig.axes, asp_list, locator_list): ax.set_aspect(asp) ax.set_axes_locator(loc) fig.bbox = origBbox fig.bbox_inches = origBboxInches fig.transFigure._boxout = _boxout fig.transFigure.invalidate() fig.patch.set_bounds(0, 0, 1, 1) adjust_bbox_handler = _adjust_bbox_handler_d.get(format) if (adjust_bbox_handler is not None): adjust_bbox_handler(fig, bbox_inches) return restore_bbox else: warnings.warn(('bbox_inches option for %s backend is not implemented yet.' % format)) return None
def adjust_bbox_png(fig, bbox_inches): '\n adjust_bbox for png (Agg) format\n ' tr = fig.dpi_scale_trans _bbox = TransformedBbox(bbox_inches, tr) (x0, y0) = (_bbox.x0, _bbox.y0) fig.bbox_inches = Bbox.from_bounds(0, 0, bbox_inches.width, bbox_inches.height) (x0, y0) = (_bbox.x0, _bbox.y0) (w1, h1) = (fig.bbox.width, fig.bbox.height) fig.transFigure._boxout = Bbox.from_bounds((- x0), (- y0), w1, h1) fig.transFigure.invalidate() fig.bbox = TransformedBbox(fig.bbox_inches, tr) fig.patch.set_bounds((x0 / w1), (y0 / h1), (fig.bbox.width / w1), (fig.bbox.height / h1))
3,590,841,682,367,379,000
adjust_bbox for png (Agg) format
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py
adjust_bbox_png
mattl1598/Project-Mochachino
python
def adjust_bbox_png(fig, bbox_inches): '\n \n ' tr = fig.dpi_scale_trans _bbox = TransformedBbox(bbox_inches, tr) (x0, y0) = (_bbox.x0, _bbox.y0) fig.bbox_inches = Bbox.from_bounds(0, 0, bbox_inches.width, bbox_inches.height) (x0, y0) = (_bbox.x0, _bbox.y0) (w1, h1) = (fig.bbox.width, fig.bbox.height) fig.transFigure._boxout = Bbox.from_bounds((- x0), (- y0), w1, h1) fig.transFigure.invalidate() fig.bbox = TransformedBbox(fig.bbox_inches, tr) fig.patch.set_bounds((x0 / w1), (y0 / h1), (fig.bbox.width / w1), (fig.bbox.height / h1))
def adjust_bbox_pdf(fig, bbox_inches): '\n adjust_bbox for pdf & eps format\n ' if (fig._cachedRenderer.__class__.__name__ == 'RendererPgf'): tr = Affine2D().scale(fig.dpi) f = 1.0 else: tr = Affine2D().scale(72) f = (72.0 / fig.dpi) _bbox = TransformedBbox(bbox_inches, tr) fig.bbox_inches = Bbox.from_bounds(0, 0, bbox_inches.width, bbox_inches.height) (x0, y0) = (_bbox.x0, _bbox.y0) (w1, h1) = ((fig.bbox.width * f), (fig.bbox.height * f)) fig.transFigure._boxout = Bbox.from_bounds((- x0), (- y0), w1, h1) fig.transFigure.invalidate() fig.bbox = TransformedBbox(fig.bbox_inches, tr) fig.patch.set_bounds((x0 / w1), (y0 / h1), (fig.bbox.width / w1), (fig.bbox.height / h1))
6,674,782,105,807,579,000
adjust_bbox for pdf & eps format
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py
adjust_bbox_pdf
mattl1598/Project-Mochachino
python
def adjust_bbox_pdf(fig, bbox_inches): '\n \n ' if (fig._cachedRenderer.__class__.__name__ == 'RendererPgf'): tr = Affine2D().scale(fig.dpi) f = 1.0 else: tr = Affine2D().scale(72) f = (72.0 / fig.dpi) _bbox = TransformedBbox(bbox_inches, tr) fig.bbox_inches = Bbox.from_bounds(0, 0, bbox_inches.width, bbox_inches.height) (x0, y0) = (_bbox.x0, _bbox.y0) (w1, h1) = ((fig.bbox.width * f), (fig.bbox.height * f)) fig.transFigure._boxout = Bbox.from_bounds((- x0), (- y0), w1, h1) fig.transFigure.invalidate() fig.bbox = TransformedBbox(fig.bbox_inches, tr) fig.patch.set_bounds((x0 / w1), (y0 / h1), (fig.bbox.width / w1), (fig.bbox.height / h1))
def process_figure_for_rasterizing(figure, bbox_inches_restore, mode): '\n This need to be called when figure dpi changes during the drawing\n (e.g., rasterizing). It recovers the bbox and re-adjust it with\n the new dpi.\n ' (bbox_inches, restore_bbox) = bbox_inches_restore restore_bbox() r = adjust_bbox(figure, mode, bbox_inches) return (bbox_inches, r)
424,844,068,388,893,500
This need to be called when figure dpi changes during the drawing (e.g., rasterizing). It recovers the bbox and re-adjust it with the new dpi.
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/matplotlib/tight_bbox.py
process_figure_for_rasterizing
mattl1598/Project-Mochachino
python
def process_figure_for_rasterizing(figure, bbox_inches_restore, mode): '\n This need to be called when figure dpi changes during the drawing\n (e.g., rasterizing). It recovers the bbox and re-adjust it with\n the new dpi.\n ' (bbox_inches, restore_bbox) = bbox_inches_restore restore_bbox() r = adjust_bbox(figure, mode, bbox_inches) return (bbox_inches, r)
def validate_authorization_request(self): 'The client constructs the request URI by adding the following\n parameters to the query component of the authorization endpoint URI\n using the "application/x-www-form-urlencoded" format.\n Per `Section 4.2.1`_.\n\n response_type\n REQUIRED. Value MUST be set to "token".\n\n client_id\n REQUIRED. The client identifier as described in Section 2.2.\n\n redirect_uri\n OPTIONAL. As described in Section 3.1.2.\n\n scope\n OPTIONAL. The scope of the access request as described by\n Section 3.3.\n\n state\n RECOMMENDED. An opaque value used by the client to maintain\n state between the request and callback. The authorization\n server includes this value when redirecting the user-agent back\n to the client. The parameter SHOULD be used for preventing\n cross-site request forgery as described in Section 10.12.\n\n The client directs the resource owner to the constructed URI using an\n HTTP redirection response, or by other means available to it via the\n user-agent.\n\n For example, the client directs the user-agent to make the following\n HTTP request using TLS:\n\n .. code-block:: http\n\n GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz\n &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1\n Host: server.example.com\n\n .. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1\n ' client = self.authenticate_token_endpoint_client() log.debug('Validate authorization request of %r', client) redirect_uri = self.validate_authorization_redirect_uri(self.request, client) response_type = self.request.response_type if (not client.check_response_type(response_type)): raise UnauthorizedClientError('The client is not authorized to use "response_type={}"'.format(response_type), state=self.request.state, redirect_uri=redirect_uri, redirect_fragment=True) try: self.request.client = client self.validate_requested_scope() self.execute_hook('after_validate_authorization_request') except OAuth2Error as error: error.redirect_uri = redirect_uri error.redirect_fragment = True raise error return redirect_uri
-2,264,537,786,648,840,400
The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format. Per `Section 4.2.1`_. response_type REQUIRED. Value MUST be set to "token". client_id REQUIRED. The client identifier as described in Section 2.2. redirect_uri OPTIONAL. As described in Section 3.1.2. scope OPTIONAL. The scope of the access request as described by Section 3.3. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in Section 10.12. The client directs the resource owner to the constructed URI using an HTTP redirection response, or by other means available to it via the user-agent. For example, the client directs the user-agent to make the following HTTP request using TLS: .. code-block:: http GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 Host: server.example.com .. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1
authlib/oauth2/rfc6749/grants/implicit.py
validate_authorization_request
2tunnels/authlib
python
def validate_authorization_request(self): 'The client constructs the request URI by adding the following\n parameters to the query component of the authorization endpoint URI\n using the "application/x-www-form-urlencoded" format.\n Per `Section 4.2.1`_.\n\n response_type\n REQUIRED. Value MUST be set to "token".\n\n client_id\n REQUIRED. The client identifier as described in Section 2.2.\n\n redirect_uri\n OPTIONAL. As described in Section 3.1.2.\n\n scope\n OPTIONAL. The scope of the access request as described by\n Section 3.3.\n\n state\n RECOMMENDED. An opaque value used by the client to maintain\n state between the request and callback. The authorization\n server includes this value when redirecting the user-agent back\n to the client. The parameter SHOULD be used for preventing\n cross-site request forgery as described in Section 10.12.\n\n The client directs the resource owner to the constructed URI using an\n HTTP redirection response, or by other means available to it via the\n user-agent.\n\n For example, the client directs the user-agent to make the following\n HTTP request using TLS:\n\n .. code-block:: http\n\n GET /authorize?response_type=token&client_id=s6BhdRkqt3&state=xyz\n &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1\n Host: server.example.com\n\n .. _`Section 4.2.1`: https://tools.ietf.org/html/rfc6749#section-4.2.1\n ' client = self.authenticate_token_endpoint_client() log.debug('Validate authorization request of %r', client) redirect_uri = self.validate_authorization_redirect_uri(self.request, client) response_type = self.request.response_type if (not client.check_response_type(response_type)): raise UnauthorizedClientError('The client is not authorized to use "response_type={}"'.format(response_type), state=self.request.state, redirect_uri=redirect_uri, redirect_fragment=True) try: self.request.client = client self.validate_requested_scope() self.execute_hook('after_validate_authorization_request') except OAuth2Error as error: error.redirect_uri = redirect_uri error.redirect_fragment = True raise error return redirect_uri
def create_authorization_response(self, redirect_uri, grant_user): 'If the resource owner grants the access request, the authorization\n server issues an access token and delivers it to the client by adding\n the following parameters to the fragment component of the redirection\n URI using the "application/x-www-form-urlencoded" format.\n Per `Section 4.2.2`_.\n\n access_token\n REQUIRED. The access token issued by the authorization server.\n\n token_type\n REQUIRED. The type of the token issued as described in\n Section 7.1. Value is case insensitive.\n\n expires_in\n RECOMMENDED. The lifetime in seconds of the access token. For\n example, the value "3600" denotes that the access token will\n expire in one hour from the time the response was generated.\n If omitted, the authorization server SHOULD provide the\n expiration time via other means or document the default value.\n\n scope\n OPTIONAL, if identical to the scope requested by the client;\n otherwise, REQUIRED. The scope of the access token as\n described by Section 3.3.\n\n state\n REQUIRED if the "state" parameter was present in the client\n authorization request. The exact value received from the\n client.\n\n The authorization server MUST NOT issue a refresh token.\n\n For example, the authorization server redirects the user-agent by\n sending the following HTTP response:\n\n .. code-block:: http\n\n HTTP/1.1 302 Found\n Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA\n &state=xyz&token_type=example&expires_in=3600\n\n Developers should note that some user-agents do not support the\n inclusion of a fragment component in the HTTP "Location" response\n header field. Such clients will require using other methods for\n redirecting the client than a 3xx redirection response -- for\n example, returning an HTML page that includes a \'continue\' button\n with an action linked to the redirection URI.\n\n .. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2\n\n :param redirect_uri: Redirect to the given URI for the authorization\n :param grant_user: if resource owner granted the request, pass this\n resource owner, otherwise pass None.\n :returns: (status_code, body, headers)\n ' state = self.request.state if grant_user: self.request.user = grant_user client = self.request.client token = self.generate_token(client, self.GRANT_TYPE, user=grant_user, scope=client.get_allowed_scope(self.request.scope), include_refresh_token=False) log.debug('Grant token %r to %r', token, client) self.save_token(token) self.execute_hook('process_token', token=token) params = [(k, token[k]) for k in token] if state: params.append(('state', state)) uri = add_params_to_uri(redirect_uri, params, fragment=True) headers = [('Location', uri)] return (302, '', headers) else: raise AccessDeniedError(state=state, redirect_uri=redirect_uri, redirect_fragment=True)
-206,207,470,362,243,260
If the resource owner grants the access request, the authorization server issues an access token and delivers it to the client by adding the following parameters to the fragment component of the redirection URI using the "application/x-www-form-urlencoded" format. Per `Section 4.2.2`_. access_token REQUIRED. The access token issued by the authorization server. token_type REQUIRED. The type of the token issued as described in Section 7.1. Value is case insensitive. expires_in RECOMMENDED. The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. If omitted, the authorization server SHOULD provide the expiration time via other means or document the default value. scope OPTIONAL, if identical to the scope requested by the client; otherwise, REQUIRED. The scope of the access token as described by Section 3.3. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. The authorization server MUST NOT issue a refresh token. For example, the authorization server redirects the user-agent by sending the following HTTP response: .. code-block:: http HTTP/1.1 302 Found Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA &state=xyz&token_type=example&expires_in=3600 Developers should note that some user-agents do not support the inclusion of a fragment component in the HTTP "Location" response header field. Such clients will require using other methods for redirecting the client than a 3xx redirection response -- for example, returning an HTML page that includes a 'continue' button with an action linked to the redirection URI. .. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2 :param redirect_uri: Redirect to the given URI for the authorization :param grant_user: if resource owner granted the request, pass this resource owner, otherwise pass None. :returns: (status_code, body, headers)
authlib/oauth2/rfc6749/grants/implicit.py
create_authorization_response
2tunnels/authlib
python
def create_authorization_response(self, redirect_uri, grant_user): 'If the resource owner grants the access request, the authorization\n server issues an access token and delivers it to the client by adding\n the following parameters to the fragment component of the redirection\n URI using the "application/x-www-form-urlencoded" format.\n Per `Section 4.2.2`_.\n\n access_token\n REQUIRED. The access token issued by the authorization server.\n\n token_type\n REQUIRED. The type of the token issued as described in\n Section 7.1. Value is case insensitive.\n\n expires_in\n RECOMMENDED. The lifetime in seconds of the access token. For\n example, the value "3600" denotes that the access token will\n expire in one hour from the time the response was generated.\n If omitted, the authorization server SHOULD provide the\n expiration time via other means or document the default value.\n\n scope\n OPTIONAL, if identical to the scope requested by the client;\n otherwise, REQUIRED. The scope of the access token as\n described by Section 3.3.\n\n state\n REQUIRED if the "state" parameter was present in the client\n authorization request. The exact value received from the\n client.\n\n The authorization server MUST NOT issue a refresh token.\n\n For example, the authorization server redirects the user-agent by\n sending the following HTTP response:\n\n .. code-block:: http\n\n HTTP/1.1 302 Found\n Location: http://example.com/cb#access_token=2YotnFZFEjr1zCsicMWpAA\n &state=xyz&token_type=example&expires_in=3600\n\n Developers should note that some user-agents do not support the\n inclusion of a fragment component in the HTTP "Location" response\n header field. Such clients will require using other methods for\n redirecting the client than a 3xx redirection response -- for\n example, returning an HTML page that includes a \'continue\' button\n with an action linked to the redirection URI.\n\n .. _`Section 4.2.2`: https://tools.ietf.org/html/rfc6749#section-4.2.2\n\n :param redirect_uri: Redirect to the given URI for the authorization\n :param grant_user: if resource owner granted the request, pass this\n resource owner, otherwise pass None.\n :returns: (status_code, body, headers)\n ' state = self.request.state if grant_user: self.request.user = grant_user client = self.request.client token = self.generate_token(client, self.GRANT_TYPE, user=grant_user, scope=client.get_allowed_scope(self.request.scope), include_refresh_token=False) log.debug('Grant token %r to %r', token, client) self.save_token(token) self.execute_hook('process_token', token=token) params = [(k, token[k]) for k in token] if state: params.append(('state', state)) uri = add_params_to_uri(redirect_uri, params, fragment=True) headers = [('Location', uri)] return (302, , headers) else: raise AccessDeniedError(state=state, redirect_uri=redirect_uri, redirect_fragment=True)
def combination(n, r): '\n :param n: the count of different items\n :param r: the number of select\n :return: combination\n n! / (r! * (n - r)!)\n ' r = min((n - r), r) result = 1 for i in range(n, (n - r), (- 1)): result *= i for i in range(1, (r + 1)): result //= i return result
-5,737,441,407,772,606,000
:param n: the count of different items :param r: the number of select :return: combination n! / (r! * (n - r)!)
lib/python-lib/combination.py
combination
ta7uw/atcoder
python
def combination(n, r): '\n :param n: the count of different items\n :param r: the number of select\n :return: combination\n n! / (r! * (n - r)!)\n ' r = min((n - r), r) result = 1 for i in range(n, (n - r), (- 1)): result *= i for i in range(1, (r + 1)): result //= i return result
@cell def cdsem_straight(widths: Tuple[(float, ...)]=(0.4, 0.45, 0.5, 0.6, 0.8, 1.0), length: float=LINE_LENGTH, cross_section: CrossSectionFactory=strip, text: Optional[ComponentFactory]=text_rectangular_mini, spacing: float=3) -> Component: 'Returns straight waveguide lines width sweep.\n\n Args:\n widths: for the sweep\n length: for the line\n cross_section: for the lines\n text: optional text for labels\n spacing: edge to edge spacing\n ' lines = [] for width in widths: cross_section = partial(cross_section, width=width) line = straight_function(length=length, cross_section=cross_section) if text: line = line.copy() t = (line << text(str(int((width * 1000.0))))) t.xmin = (line.xmax + 5) t.y = 0 lines.append(line) return grid(lines, spacing=(0, spacing))
8,057,996,998,196,953,000
Returns straight waveguide lines width sweep. Args: widths: for the sweep length: for the line cross_section: for the lines text: optional text for labels spacing: edge to edge spacing
gdsfactory/components/cdsem_straight.py
cdsem_straight
gdsfactory/gdsfactory
python
@cell def cdsem_straight(widths: Tuple[(float, ...)]=(0.4, 0.45, 0.5, 0.6, 0.8, 1.0), length: float=LINE_LENGTH, cross_section: CrossSectionFactory=strip, text: Optional[ComponentFactory]=text_rectangular_mini, spacing: float=3) -> Component: 'Returns straight waveguide lines width sweep.\n\n Args:\n widths: for the sweep\n length: for the line\n cross_section: for the lines\n text: optional text for labels\n spacing: edge to edge spacing\n ' lines = [] for width in widths: cross_section = partial(cross_section, width=width) line = straight_function(length=length, cross_section=cross_section) if text: line = line.copy() t = (line << text(str(int((width * 1000.0))))) t.xmin = (line.xmax + 5) t.y = 0 lines.append(line) return grid(lines, spacing=(0, spacing))
def write(self, value: int, from_hw: bool) -> int: 'Stage the effects of writing a value (see RGReg.write)' assert (value >= 0) masked = (value & ((1 << self.width) - 1)) if (self.read_only and (not from_hw)): pass elif (self.w1c and (not from_hw)): self.next_value &= (~ masked) else: self.next_value = masked return self._next_sw_read()
981,121,838,863,917,000
Stage the effects of writing a value (see RGReg.write)
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
write
Daasin/FOSS-fTPM
python
def write(self, value: int, from_hw: bool) -> int: assert (value >= 0) masked = (value & ((1 << self.width) - 1)) if (self.read_only and (not from_hw)): pass elif (self.w1c and (not from_hw)): self.next_value &= (~ masked) else: self.next_value = masked return self._next_sw_read()
def set_bits(self, value: int) -> int: 'Like write, but |=.' masked = (value & ((1 << self.width) - 1)) self.next_value |= masked return self._next_sw_read()
8,974,269,918,986,983,000
Like write, but |=.
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
set_bits
Daasin/FOSS-fTPM
python
def set_bits(self, value: int) -> int: masked = (value & ((1 << self.width) - 1)) self.next_value |= masked return self._next_sw_read()
def clear_bits(self, value: int) -> int: 'Like write, but &= ~.' self.next_value &= (~ value) return self._next_sw_read()
1,797,275,488,805,924,000
Like write, but &= ~.
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
clear_bits
Daasin/FOSS-fTPM
python
def clear_bits(self, value: int) -> int: self.next_value &= (~ value) return self._next_sw_read()
def write(self, value: int, from_hw: bool) -> None: 'Stage the effects of writing a value.\n\n If from_hw is true, this write is from OTBN hardware (rather than the\n bus).\n\n ' assert (value >= 0) now = self._apply_fields((lambda fld, fv: fld.write(fv, from_hw)), value) trace = (self._next_trace if self.double_flopped else self._trace) trace.append(ExtRegChange('=', value, from_hw, now))
1,804,722,120,910,216,000
Stage the effects of writing a value. If from_hw is true, this write is from OTBN hardware (rather than the bus).
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
write
Daasin/FOSS-fTPM
python
def write(self, value: int, from_hw: bool) -> None: 'Stage the effects of writing a value.\n\n If from_hw is true, this write is from OTBN hardware (rather than the\n bus).\n\n ' assert (value >= 0) now = self._apply_fields((lambda fld, fv: fld.write(fv, from_hw)), value) trace = (self._next_trace if self.double_flopped else self._trace) trace.append(ExtRegChange('=', value, from_hw, now))
def write(self, reg_name: str, value: int, from_hw: bool) -> None: 'Stage the effects of writing a value to a register' assert (value >= 0) self._get_reg(reg_name).write(value, from_hw) self._dirty = 2
-2,695,661,009,836,998,000
Stage the effects of writing a value to a register
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
write
Daasin/FOSS-fTPM
python
def write(self, reg_name: str, value: int, from_hw: bool) -> None: assert (value >= 0) self._get_reg(reg_name).write(value, from_hw) self._dirty = 2
def set_bits(self, reg_name: str, value: int) -> None: 'Set some bits of a register (HW access only)' assert (value >= 0) self._get_reg(reg_name).set_bits(value) self._dirty = 2
-3,031,869,466,492,627,000
Set some bits of a register (HW access only)
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
set_bits
Daasin/FOSS-fTPM
python
def set_bits(self, reg_name: str, value: int) -> None: assert (value >= 0) self._get_reg(reg_name).set_bits(value) self._dirty = 2
def increment_insn_cnt(self) -> None: 'Increment the INSN_CNT register' reg = self._get_reg('INSN_CNT') assert (len(reg.fields) == 1) fld = reg.fields[0] reg.write(min((fld.value + 1), ((1 << 32) - 1)), True) self._dirty = 2
6,531,003,870,117,979,000
Increment the INSN_CNT register
hw/ip/otbn/dv/otbnsim/sim/ext_regs.py
increment_insn_cnt
Daasin/FOSS-fTPM
python
def increment_insn_cnt(self) -> None: reg = self._get_reg('INSN_CNT') assert (len(reg.fields) == 1) fld = reg.fields[0] reg.write(min((fld.value + 1), ((1 << 32) - 1)), True) self._dirty = 2
def rollout(self, **args): " Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] " raise NotImplementedError
5,032,299,443,237,458,000
Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)]
r2r_src/agent.py
rollout
rcorona/R2R-EnvDrop
python
def rollout(self, **args): " " raise NotImplementedError
def _sort_batch(self, obs): ' Extract instructions from a list of observations and sort by descending\n sequence length (to enable PyTorch packing). ' seq_tensor = np.array([ob['instr_encoding'] for ob in obs]) seq_lengths = np.argmax((seq_tensor == padding_idx), axis=1) seq_lengths[(seq_lengths == 0)] = seq_tensor.shape[1] seq_tensor = torch.from_numpy(seq_tensor) seq_lengths = torch.from_numpy(seq_lengths) (seq_lengths, perm_idx) = seq_lengths.sort(0, True) sorted_tensor = seq_tensor[perm_idx] mask = (sorted_tensor == padding_idx)[:, :seq_lengths[0]] return (Variable(sorted_tensor, requires_grad=False).long().cuda(), mask.byte().cuda(), list(seq_lengths), list(perm_idx))
8,841,784,052,885,048,000
Extract instructions from a list of observations and sort by descending sequence length (to enable PyTorch packing).
r2r_src/agent.py
_sort_batch
rcorona/R2R-EnvDrop
python
def _sort_batch(self, obs): ' Extract instructions from a list of observations and sort by descending\n sequence length (to enable PyTorch packing). ' seq_tensor = np.array([ob['instr_encoding'] for ob in obs]) seq_lengths = np.argmax((seq_tensor == padding_idx), axis=1) seq_lengths[(seq_lengths == 0)] = seq_tensor.shape[1] seq_tensor = torch.from_numpy(seq_tensor) seq_lengths = torch.from_numpy(seq_lengths) (seq_lengths, perm_idx) = seq_lengths.sort(0, True) sorted_tensor = seq_tensor[perm_idx] mask = (sorted_tensor == padding_idx)[:, :seq_lengths[0]] return (Variable(sorted_tensor, requires_grad=False).long().cuda(), mask.byte().cuda(), list(seq_lengths), list(perm_idx))
def _feature_variable(self, obs): ' Extract precomputed features into variable. ' features = np.empty((len(obs), args.views, (self.feature_size + args.angle_feat_size)), dtype=np.float32) for (i, ob) in enumerate(obs): features[i, :, :] = ob['feature'] return Variable(torch.from_numpy(features), requires_grad=False).cuda()
-5,244,354,546,349,609,000
Extract precomputed features into variable.
r2r_src/agent.py
_feature_variable
rcorona/R2R-EnvDrop
python
def _feature_variable(self, obs): ' ' features = np.empty((len(obs), args.views, (self.feature_size + args.angle_feat_size)), dtype=np.float32) for (i, ob) in enumerate(obs): features[i, :, :] = ob['feature'] return Variable(torch.from_numpy(features), requires_grad=False).cuda()
def _teacher_action(self, obs, ended): '\n Extract teacher actions into variable.\n :param obs: The observation.\n :param ended: Whether the action seq is ended\n :return:\n ' a = np.zeros(len(obs), dtype=np.int64) for (i, ob) in enumerate(obs): if ended[i]: a[i] = args.ignoreid else: for (k, candidate) in enumerate(ob['candidate']): if (candidate['viewpointId'] == ob['teacher']): a[i] = k break else: assert (ob['teacher'] == ob['viewpoint']) a[i] = len(ob['candidate']) return torch.from_numpy(a).cuda()
-6,105,101,396,949,073,000
Extract teacher actions into variable. :param obs: The observation. :param ended: Whether the action seq is ended :return:
r2r_src/agent.py
_teacher_action
rcorona/R2R-EnvDrop
python
def _teacher_action(self, obs, ended): '\n Extract teacher actions into variable.\n :param obs: The observation.\n :param ended: Whether the action seq is ended\n :return:\n ' a = np.zeros(len(obs), dtype=np.int64) for (i, ob) in enumerate(obs): if ended[i]: a[i] = args.ignoreid else: for (k, candidate) in enumerate(ob['candidate']): if (candidate['viewpointId'] == ob['teacher']): a[i] = k break else: assert (ob['teacher'] == ob['viewpoint']) a[i] = len(ob['candidate']) return torch.from_numpy(a).cuda()
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None): '\n Interface between Panoramic view and Egocentric view \n It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator\n ' def take_action(i, idx, name): if (type(name) is int): self.env.env.sims[idx].makeAction(name, 0, 0) else: self.env.env.sims[idx].makeAction(*self.env_actions[name]) state = self.env.env.sims[idx].getState() if (traj is not None): traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation)) if (perm_idx is None): perm_idx = range(len(perm_obs)) for (i, idx) in enumerate(perm_idx): action = a_t[i] if (action != (- 1)): select_candidate = perm_obs[i]['candidate'][action] src_point = perm_obs[i]['viewIndex'] trg_point = select_candidate['pointId'] src_level = (src_point // 12) trg_level = (trg_point // 12) while (src_level < trg_level): take_action(i, idx, 'up') src_level += 1 while (src_level > trg_level): take_action(i, idx, 'down') src_level -= 1 while (self.env.env.sims[idx].getState().viewIndex != trg_point): take_action(i, idx, 'right') assert (select_candidate['viewpointId'] == self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId) take_action(i, idx, select_candidate['idx'])
1,658,864,895,377,073,200
Interface between Panoramic view and Egocentric view It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator
r2r_src/agent.py
make_equiv_action
rcorona/R2R-EnvDrop
python
def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None): '\n Interface between Panoramic view and Egocentric view \n It will convert the action panoramic view action a_t to equivalent egocentric view actions for the simulator\n ' def take_action(i, idx, name): if (type(name) is int): self.env.env.sims[idx].makeAction(name, 0, 0) else: self.env.env.sims[idx].makeAction(*self.env_actions[name]) state = self.env.env.sims[idx].getState() if (traj is not None): traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation)) if (perm_idx is None): perm_idx = range(len(perm_obs)) for (i, idx) in enumerate(perm_idx): action = a_t[i] if (action != (- 1)): select_candidate = perm_obs[i]['candidate'][action] src_point = perm_obs[i]['viewIndex'] trg_point = select_candidate['pointId'] src_level = (src_point // 12) trg_level = (trg_point // 12) while (src_level < trg_level): take_action(i, idx, 'up') src_level += 1 while (src_level > trg_level): take_action(i, idx, 'down') src_level -= 1 while (self.env.env.sims[idx].getState().viewIndex != trg_point): take_action(i, idx, 'right') assert (select_candidate['viewpointId'] == self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId) take_action(i, idx, select_candidate['idx'])
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None): '\n :param train_ml: The weight to train with maximum likelihood\n :param train_rl: whether use RL in training\n :param reset: Reset the environment\n :param speaker: Speaker used in back translation.\n If the speaker is not None, use back translation.\n O.w., normal training\n :return:\n ' if ((self.feedback == 'teacher') or (self.feedback == 'argmax')): train_rl = False if reset: obs = np.array(self.env.reset()) else: obs = np.array(self.env._get_obs()) batch_size = len(obs) if (speaker is not None): noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda()) batch = self.env.batch.copy() speaker.env = self.env insts = speaker.infer_batch(featdropmask=noise) boss = (np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>']) insts = np.concatenate((boss, insts), 1) for (i, (datum, inst)) in enumerate(zip(batch, insts)): if (inst[(- 1)] != self.tok.word_to_index['<PAD>']): inst[(- 1)] = self.tok.word_to_index['<EOS>'] datum.pop('instructions') datum.pop('instr_encoding') datum['instructions'] = self.tok.decode_sentence(inst) datum['instr_encoding'] = inst obs = np.array(self.env.reset(batch)) (seq, seq_mask, seq_lengths, perm_idx) = self._sort_batch(obs) perm_obs = obs[perm_idx] (ctx, h_t, c_t) = self.encoder(seq, seq_lengths) ctx_mask = seq_mask last_dist = np.zeros(batch_size, np.float32) for (i, ob) in enumerate(perm_obs): last_dist[i] = ob['distance'] traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs] visited = [set() for _ in perm_obs] ended = np.array(([False] * batch_size)) rewards = [] hidden_states = [] policy_log_probs = [] masks = [] entropys = [] ml_loss = 0.0 h1 = h_t for t in range(self.episode_len): (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(perm_obs) if (speaker is not None): candidate_feat[..., :(- args.angle_feat_size)] *= noise f_t[..., :(- args.angle_feat_size)] *= noise (h_t, c_t, logit, h1) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, already_dropfeat=(speaker is not None)) hidden_states.append(h_t) candidate_mask = utils.length2mask(candidate_leng) if args.submit: for (ob_id, ob) in enumerate(perm_obs): visited[ob_id].add(ob['viewpoint']) for (c_id, c) in enumerate(ob['candidate']): if (c['viewpointId'] in visited[ob_id]): candidate_mask[ob_id][c_id] = 1 logit.masked_fill_(candidate_mask, (- float('inf'))) target = self._teacher_action(perm_obs, ended) ml_loss += self.criterion(logit, target) if (self.feedback == 'teacher'): a_t = target elif (self.feedback == 'argmax'): (_, a_t) = logit.max(1) a_t = a_t.detach() log_probs = F.log_softmax(logit, 1) policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) elif (self.feedback == 'sample'): probs = F.softmax(logit, 1) c = torch.distributions.Categorical(probs) self.logs['entropy'].append(c.entropy().sum().item()) entropys.append(c.entropy()) a_t = c.sample().detach() policy_log_probs.append(c.log_prob(a_t)) else: print(self.feedback) sys.exit('Invalid feedback option') cpu_a_t = a_t.cpu().numpy() for (i, next_id) in enumerate(cpu_a_t): if ((next_id == (candidate_leng[i] - 1)) or (next_id == args.ignoreid) or ended[i]): cpu_a_t[i] = (- 1) self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj) obs = np.array(self.env._get_obs()) perm_obs = obs[perm_idx] dist = np.zeros(batch_size, np.float32) reward = np.zeros(batch_size, np.float32) mask = np.ones(batch_size, np.float32) for (i, ob) in enumerate(perm_obs): dist[i] = ob['distance'] if ended[i]: reward[i] = 0.0 mask[i] = 0.0 else: action_idx = cpu_a_t[i] if (action_idx == (- 1)): if (dist[i] < 3): reward[i] = 2.0 else: reward[i] = (- 2.0) else: reward[i] = (- (dist[i] - last_dist[i])) if (reward[i] > 0): reward[i] = 1 elif (reward[i] < 0): reward[i] = (- 1) else: raise NameError("The action doesn't change the move") rewards.append(reward) masks.append(mask) last_dist[:] = dist ended[:] = np.logical_or(ended, (cpu_a_t == (- 1))) if ended.all(): break if train_rl: (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(perm_obs) if (speaker is not None): candidate_feat[..., :(- args.angle_feat_size)] *= noise f_t[..., :(- args.angle_feat_size)] *= noise (last_h_, _, _, _) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, (speaker is not None)) rl_loss = 0.0 last_value__ = self.critic(last_h_).detach() discount_reward = np.zeros(batch_size, np.float32) for i in range(batch_size): if (not ended[i]): discount_reward[i] = last_value__[i] length = len(rewards) total = 0 for t in range((length - 1), (- 1), (- 1)): discount_reward = ((discount_reward * args.gamma) + rewards[t]) mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda() clip_reward = discount_reward.copy() r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda() v_ = self.critic(hidden_states[t]) a_ = (r_ - v_).detach() rl_loss += (((- policy_log_probs[t]) * a_) * mask_).sum() rl_loss += ((((r_ - v_) ** 2) * mask_).sum() * 0.5) if (self.feedback == 'sample'): rl_loss += (((- 0.01) * entropys[t]) * mask_).sum() self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item()) total = (total + np.sum(masks[t])) self.logs['total'].append(total) if (args.normalize_loss == 'total'): rl_loss /= total elif (args.normalize_loss == 'batch'): rl_loss /= batch_size else: assert (args.normalize_loss == 'none') self.loss += rl_loss if (train_ml is not None): self.loss += ((ml_loss * train_ml) / batch_size) if (type(self.loss) is int): self.losses.append(0.0) else: self.losses.append((self.loss.item() / self.episode_len)) return traj
-5,757,432,292,980,751,000
:param train_ml: The weight to train with maximum likelihood :param train_rl: whether use RL in training :param reset: Reset the environment :param speaker: Speaker used in back translation. If the speaker is not None, use back translation. O.w., normal training :return:
r2r_src/agent.py
rollout
rcorona/R2R-EnvDrop
python
def rollout(self, train_ml=None, train_rl=True, reset=True, speaker=None): '\n :param train_ml: The weight to train with maximum likelihood\n :param train_rl: whether use RL in training\n :param reset: Reset the environment\n :param speaker: Speaker used in back translation.\n If the speaker is not None, use back translation.\n O.w., normal training\n :return:\n ' if ((self.feedback == 'teacher') or (self.feedback == 'argmax')): train_rl = False if reset: obs = np.array(self.env.reset()) else: obs = np.array(self.env._get_obs()) batch_size = len(obs) if (speaker is not None): noise = self.decoder.drop_env(torch.ones(self.feature_size).cuda()) batch = self.env.batch.copy() speaker.env = self.env insts = speaker.infer_batch(featdropmask=noise) boss = (np.ones((batch_size, 1), np.int64) * self.tok.word_to_index['<BOS>']) insts = np.concatenate((boss, insts), 1) for (i, (datum, inst)) in enumerate(zip(batch, insts)): if (inst[(- 1)] != self.tok.word_to_index['<PAD>']): inst[(- 1)] = self.tok.word_to_index['<EOS>'] datum.pop('instructions') datum.pop('instr_encoding') datum['instructions'] = self.tok.decode_sentence(inst) datum['instr_encoding'] = inst obs = np.array(self.env.reset(batch)) (seq, seq_mask, seq_lengths, perm_idx) = self._sort_batch(obs) perm_obs = obs[perm_idx] (ctx, h_t, c_t) = self.encoder(seq, seq_lengths) ctx_mask = seq_mask last_dist = np.zeros(batch_size, np.float32) for (i, ob) in enumerate(perm_obs): last_dist[i] = ob['distance'] traj = [{'instr_id': ob['instr_id'], 'path': [(ob['viewpoint'], ob['heading'], ob['elevation'])]} for ob in perm_obs] visited = [set() for _ in perm_obs] ended = np.array(([False] * batch_size)) rewards = [] hidden_states = [] policy_log_probs = [] masks = [] entropys = [] ml_loss = 0.0 h1 = h_t for t in range(self.episode_len): (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(perm_obs) if (speaker is not None): candidate_feat[..., :(- args.angle_feat_size)] *= noise f_t[..., :(- args.angle_feat_size)] *= noise (h_t, c_t, logit, h1) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, already_dropfeat=(speaker is not None)) hidden_states.append(h_t) candidate_mask = utils.length2mask(candidate_leng) if args.submit: for (ob_id, ob) in enumerate(perm_obs): visited[ob_id].add(ob['viewpoint']) for (c_id, c) in enumerate(ob['candidate']): if (c['viewpointId'] in visited[ob_id]): candidate_mask[ob_id][c_id] = 1 logit.masked_fill_(candidate_mask, (- float('inf'))) target = self._teacher_action(perm_obs, ended) ml_loss += self.criterion(logit, target) if (self.feedback == 'teacher'): a_t = target elif (self.feedback == 'argmax'): (_, a_t) = logit.max(1) a_t = a_t.detach() log_probs = F.log_softmax(logit, 1) policy_log_probs.append(log_probs.gather(1, a_t.unsqueeze(1))) elif (self.feedback == 'sample'): probs = F.softmax(logit, 1) c = torch.distributions.Categorical(probs) self.logs['entropy'].append(c.entropy().sum().item()) entropys.append(c.entropy()) a_t = c.sample().detach() policy_log_probs.append(c.log_prob(a_t)) else: print(self.feedback) sys.exit('Invalid feedback option') cpu_a_t = a_t.cpu().numpy() for (i, next_id) in enumerate(cpu_a_t): if ((next_id == (candidate_leng[i] - 1)) or (next_id == args.ignoreid) or ended[i]): cpu_a_t[i] = (- 1) self.make_equiv_action(cpu_a_t, perm_obs, perm_idx, traj) obs = np.array(self.env._get_obs()) perm_obs = obs[perm_idx] dist = np.zeros(batch_size, np.float32) reward = np.zeros(batch_size, np.float32) mask = np.ones(batch_size, np.float32) for (i, ob) in enumerate(perm_obs): dist[i] = ob['distance'] if ended[i]: reward[i] = 0.0 mask[i] = 0.0 else: action_idx = cpu_a_t[i] if (action_idx == (- 1)): if (dist[i] < 3): reward[i] = 2.0 else: reward[i] = (- 2.0) else: reward[i] = (- (dist[i] - last_dist[i])) if (reward[i] > 0): reward[i] = 1 elif (reward[i] < 0): reward[i] = (- 1) else: raise NameError("The action doesn't change the move") rewards.append(reward) masks.append(mask) last_dist[:] = dist ended[:] = np.logical_or(ended, (cpu_a_t == (- 1))) if ended.all(): break if train_rl: (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(perm_obs) if (speaker is not None): candidate_feat[..., :(- args.angle_feat_size)] *= noise f_t[..., :(- args.angle_feat_size)] *= noise (last_h_, _, _, _) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, (speaker is not None)) rl_loss = 0.0 last_value__ = self.critic(last_h_).detach() discount_reward = np.zeros(batch_size, np.float32) for i in range(batch_size): if (not ended[i]): discount_reward[i] = last_value__[i] length = len(rewards) total = 0 for t in range((length - 1), (- 1), (- 1)): discount_reward = ((discount_reward * args.gamma) + rewards[t]) mask_ = Variable(torch.from_numpy(masks[t]), requires_grad=False).cuda() clip_reward = discount_reward.copy() r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).cuda() v_ = self.critic(hidden_states[t]) a_ = (r_ - v_).detach() rl_loss += (((- policy_log_probs[t]) * a_) * mask_).sum() rl_loss += ((((r_ - v_) ** 2) * mask_).sum() * 0.5) if (self.feedback == 'sample'): rl_loss += (((- 0.01) * entropys[t]) * mask_).sum() self.logs['critic_loss'].append((((r_ - v_) ** 2) * mask_).sum().item()) total = (total + np.sum(masks[t])) self.logs['total'].append(total) if (args.normalize_loss == 'total'): rl_loss /= total elif (args.normalize_loss == 'batch'): rl_loss /= batch_size else: assert (args.normalize_loss == 'none') self.loss += rl_loss if (train_ml is not None): self.loss += ((ml_loss * train_ml) / batch_size) if (type(self.loss) is int): self.losses.append(0.0) else: self.losses.append((self.loss.item() / self.episode_len)) return traj
def _dijkstra(self): '\n The dijkstra algorithm.\n Was called beam search to be consistent with existing work.\n But it actually finds the Exact K paths with smallest listener log_prob.\n :return:\n [{\n "scan": XXX\n "instr_id":XXX,\n \'instr_encoding": XXX\n \'dijk_path\': [v1, v2, ..., vn] (The path used for find all the candidates)\n "paths": {\n "trajectory": [viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n }]\n ' def make_state_id(viewpoint, action): return ('%s_%s' % (viewpoint, str(action))) def decompose_state_id(state_id): (viewpoint, action) = state_id.split('_') action = int(action) return (viewpoint, action) obs = self.env._get_obs() batch_size = len(obs) results = [{'scan': ob['scan'], 'instr_id': ob['instr_id'], 'instr_encoding': ob['instr_encoding'], 'dijk_path': [ob['viewpoint']], 'paths': []} for ob in obs] (seq, seq_mask, seq_lengths, perm_idx) = self._sort_batch(obs) recover_idx = np.zeros_like(perm_idx) for (i, idx) in enumerate(perm_idx): recover_idx[idx] = i (ctx, h_t, c_t) = self.encoder(seq, seq_lengths) (ctx, h_t, c_t, ctx_mask) = (ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx]) id2state = [{make_state_id(ob['viewpoint'], (- 95)): {'next_viewpoint': ob['viewpoint'], 'running_state': (h_t[i], h_t[i], c_t[i]), 'location': (ob['viewpoint'], ob['heading'], ob['elevation']), 'feature': None, 'from_state_id': None, 'score': 0, 'scores': [], 'actions': []}} for (i, ob) in enumerate(obs)] visited = [set() for _ in range(batch_size)] finished = [set() for _ in range(batch_size)] graphs = [utils.FloydGraph() for _ in range(batch_size)] ended = np.array(([False] * batch_size)) for _ in range(300): smallest_idXstate = [(max(((state_id, state) for (state_id, state) in id2state[i].items() if (state_id not in visited[i])), key=(lambda item: item[1]['score'])) if (not ended[i]) else next(iter(id2state[i].items()))) for i in range(batch_size)] for (i, (state_id, state)) in enumerate(smallest_idXstate): assert (ended[i] or (state_id not in visited[i])) if (not ended[i]): (viewpoint, action) = decompose_state_id(state_id) visited[i].add(state_id) if (action == (- 1)): finished[i].add(state_id) if (len(finished[i]) >= args.candidates): ended[i] = True (h_ts, h1s, c_ts) = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate)) (h_t, h1, c_t) = (torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)) for (i, (state_id, state)) in enumerate(smallest_idXstate): next_viewpoint = state['next_viewpoint'] scan = results[i]['scan'] (from_viewpoint, heading, elevation) = state['location'] self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) obs = self.env._get_obs() for (i, ob) in enumerate(obs): viewpoint = ob['viewpoint'] if (not graphs[i].visited(viewpoint)): for c in ob['candidate']: next_viewpoint = c['viewpointId'] dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint] graphs[i].add_edge(viewpoint, next_viewpoint, dis) graphs[i].update(viewpoint) results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][(- 1)], viewpoint)) (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(obs) (h_t, c_t, alpha, logit, h1) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, False) candidate_mask = utils.length2mask(candidate_leng) logit.masked_fill_(candidate_mask, (- float('inf'))) log_probs = F.log_softmax(logit, 1) (_, max_act) = log_probs.max(1) for (i, ob) in enumerate(obs): current_viewpoint = ob['viewpoint'] candidate = ob['candidate'] (current_state_id, current_state) = smallest_idXstate[i] (old_viewpoint, from_action) = decompose_state_id(current_state_id) assert (ob['viewpoint'] == current_state['next_viewpoint']) if ((from_action == (- 1)) or ended[i]): continue for j in range((len(ob['candidate']) + 1)): modified_log_prob = log_probs[i][j].detach().cpu().item() new_score = (current_state['score'] + modified_log_prob) if (j < len(candidate)): next_id = make_state_id(current_viewpoint, j) next_viewpoint = candidate[j]['viewpointId'] trg_point = candidate[j]['pointId'] heading = (((trg_point % 12) * math.pi) / 6) elevation = ((((trg_point // 12) - 1) * math.pi) / 6) location = (next_viewpoint, heading, elevation) else: next_id = make_state_id(current_viewpoint, (- 1)) next_viewpoint = current_viewpoint location = (current_viewpoint, ob['heading'], ob['elevation']) if ((next_id not in id2state[i]) or (new_score > id2state[i][next_id]['score'])): id2state[i][next_id] = {'next_viewpoint': next_viewpoint, 'location': location, 'running_state': (h_t[i], h1[i], c_t[i]), 'from_state_id': current_state_id, 'feature': (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()), 'score': new_score, 'scores': (current_state['scores'] + [modified_log_prob]), 'actions': (current_state['actions'] + [(len(candidate) + 1)])} for i in range(batch_size): if (len(visited[i]) == len(id2state[i])): ended[i] = True if ended.all(): break for i in range(batch_size): results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][(- 1)], results[i]['dijk_path'][0])) '\n "paths": {\n "trajectory": [viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n ' for (i, result) in enumerate(results): assert (len(finished[i]) <= args.candidates) for state_id in finished[i]: path_info = {'trajectory': [], 'action': [], 'listener_scores': id2state[i][state_id]['scores'], 'listener_actions': id2state[i][state_id]['actions'], 'visual_feature': []} (viewpoint, action) = decompose_state_id(state_id) while (action != (- 95)): state = id2state[i][state_id] path_info['trajectory'].append(state['location']) path_info['action'].append(action) path_info['visual_feature'].append(state['feature']) state_id = id2state[i][state_id]['from_state_id'] (viewpoint, action) = decompose_state_id(state_id) state = id2state[i][state_id] path_info['trajectory'].append(state['location']) for need_reverse_key in ['trajectory', 'action', 'visual_feature']: path_info[need_reverse_key] = path_info[need_reverse_key][::(- 1)] result['paths'].append(path_info) return results
-1,167,941,217,106,676,200
The dijkstra algorithm. Was called beam search to be consistent with existing work. But it actually finds the Exact K paths with smallest listener log_prob. :return: [{ "scan": XXX "instr_id":XXX, 'instr_encoding": XXX 'dijk_path': [v1, v2, ..., vn] (The path used for find all the candidates) "paths": { "trajectory": [viewpoint_id1, viewpoint_id2, ..., ], "action": [act_1, act_2, ..., ], "listener_scores": [log_prob_act1, log_prob_act2, ..., ], "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...) } }]
r2r_src/agent.py
_dijkstra
rcorona/R2R-EnvDrop
python
def _dijkstra(self): '\n The dijkstra algorithm.\n Was called beam search to be consistent with existing work.\n But it actually finds the Exact K paths with smallest listener log_prob.\n :return:\n [{\n "scan": XXX\n "instr_id":XXX,\n \'instr_encoding": XXX\n \'dijk_path\': [v1, v2, ..., vn] (The path used for find all the candidates)\n "paths": {\n "trajectory": [viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n }]\n ' def make_state_id(viewpoint, action): return ('%s_%s' % (viewpoint, str(action))) def decompose_state_id(state_id): (viewpoint, action) = state_id.split('_') action = int(action) return (viewpoint, action) obs = self.env._get_obs() batch_size = len(obs) results = [{'scan': ob['scan'], 'instr_id': ob['instr_id'], 'instr_encoding': ob['instr_encoding'], 'dijk_path': [ob['viewpoint']], 'paths': []} for ob in obs] (seq, seq_mask, seq_lengths, perm_idx) = self._sort_batch(obs) recover_idx = np.zeros_like(perm_idx) for (i, idx) in enumerate(perm_idx): recover_idx[idx] = i (ctx, h_t, c_t) = self.encoder(seq, seq_lengths) (ctx, h_t, c_t, ctx_mask) = (ctx[recover_idx], h_t[recover_idx], c_t[recover_idx], seq_mask[recover_idx]) id2state = [{make_state_id(ob['viewpoint'], (- 95)): {'next_viewpoint': ob['viewpoint'], 'running_state': (h_t[i], h_t[i], c_t[i]), 'location': (ob['viewpoint'], ob['heading'], ob['elevation']), 'feature': None, 'from_state_id': None, 'score': 0, 'scores': [], 'actions': []}} for (i, ob) in enumerate(obs)] visited = [set() for _ in range(batch_size)] finished = [set() for _ in range(batch_size)] graphs = [utils.FloydGraph() for _ in range(batch_size)] ended = np.array(([False] * batch_size)) for _ in range(300): smallest_idXstate = [(max(((state_id, state) for (state_id, state) in id2state[i].items() if (state_id not in visited[i])), key=(lambda item: item[1]['score'])) if (not ended[i]) else next(iter(id2state[i].items()))) for i in range(batch_size)] for (i, (state_id, state)) in enumerate(smallest_idXstate): assert (ended[i] or (state_id not in visited[i])) if (not ended[i]): (viewpoint, action) = decompose_state_id(state_id) visited[i].add(state_id) if (action == (- 1)): finished[i].add(state_id) if (len(finished[i]) >= args.candidates): ended[i] = True (h_ts, h1s, c_ts) = zip(*(idXstate[1]['running_state'] for idXstate in smallest_idXstate)) (h_t, h1, c_t) = (torch.stack(h_ts), torch.stack(h1s), torch.stack(c_ts)) for (i, (state_id, state)) in enumerate(smallest_idXstate): next_viewpoint = state['next_viewpoint'] scan = results[i]['scan'] (from_viewpoint, heading, elevation) = state['location'] self.env.env.sims[i].newEpisode(scan, next_viewpoint, heading, elevation) obs = self.env._get_obs() for (i, ob) in enumerate(obs): viewpoint = ob['viewpoint'] if (not graphs[i].visited(viewpoint)): for c in ob['candidate']: next_viewpoint = c['viewpointId'] dis = self.env.distances[ob['scan']][viewpoint][next_viewpoint] graphs[i].add_edge(viewpoint, next_viewpoint, dis) graphs[i].update(viewpoint) results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][(- 1)], viewpoint)) (input_a_t, f_t, candidate_feat, candidate_leng) = self.get_input_feat(obs) (h_t, c_t, alpha, logit, h1) = self.decoder(input_a_t, f_t, candidate_feat, h_t, h1, c_t, ctx, ctx_mask, False) candidate_mask = utils.length2mask(candidate_leng) logit.masked_fill_(candidate_mask, (- float('inf'))) log_probs = F.log_softmax(logit, 1) (_, max_act) = log_probs.max(1) for (i, ob) in enumerate(obs): current_viewpoint = ob['viewpoint'] candidate = ob['candidate'] (current_state_id, current_state) = smallest_idXstate[i] (old_viewpoint, from_action) = decompose_state_id(current_state_id) assert (ob['viewpoint'] == current_state['next_viewpoint']) if ((from_action == (- 1)) or ended[i]): continue for j in range((len(ob['candidate']) + 1)): modified_log_prob = log_probs[i][j].detach().cpu().item() new_score = (current_state['score'] + modified_log_prob) if (j < len(candidate)): next_id = make_state_id(current_viewpoint, j) next_viewpoint = candidate[j]['viewpointId'] trg_point = candidate[j]['pointId'] heading = (((trg_point % 12) * math.pi) / 6) elevation = ((((trg_point // 12) - 1) * math.pi) / 6) location = (next_viewpoint, heading, elevation) else: next_id = make_state_id(current_viewpoint, (- 1)) next_viewpoint = current_viewpoint location = (current_viewpoint, ob['heading'], ob['elevation']) if ((next_id not in id2state[i]) or (new_score > id2state[i][next_id]['score'])): id2state[i][next_id] = {'next_viewpoint': next_viewpoint, 'location': location, 'running_state': (h_t[i], h1[i], c_t[i]), 'from_state_id': current_state_id, 'feature': (f_t[i].detach().cpu(), candidate_feat[i][j].detach().cpu()), 'score': new_score, 'scores': (current_state['scores'] + [modified_log_prob]), 'actions': (current_state['actions'] + [(len(candidate) + 1)])} for i in range(batch_size): if (len(visited[i]) == len(id2state[i])): ended[i] = True if ended.all(): break for i in range(batch_size): results[i]['dijk_path'].extend(graphs[i].path(results[i]['dijk_path'][(- 1)], results[i]['dijk_path'][0])) '\n "paths": {\n "trajectory": [viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }\n ' for (i, result) in enumerate(results): assert (len(finished[i]) <= args.candidates) for state_id in finished[i]: path_info = {'trajectory': [], 'action': [], 'listener_scores': id2state[i][state_id]['scores'], 'listener_actions': id2state[i][state_id]['actions'], 'visual_feature': []} (viewpoint, action) = decompose_state_id(state_id) while (action != (- 95)): state = id2state[i][state_id] path_info['trajectory'].append(state['location']) path_info['action'].append(action) path_info['visual_feature'].append(state['feature']) state_id = id2state[i][state_id]['from_state_id'] (viewpoint, action) = decompose_state_id(state_id) state = id2state[i][state_id] path_info['trajectory'].append(state['location']) for need_reverse_key in ['trajectory', 'action', 'visual_feature']: path_info[need_reverse_key] = path_info[need_reverse_key][::(- 1)] result['paths'].append(path_info) return results
def beam_search(self, speaker): '\n :param speaker: The speaker to be used in searching.\n :return:\n {\n "scan": XXX\n "instr_id":XXX,\n "instr_encoding": XXX\n "dijk_path": [v1, v2, ...., vn]\n "paths": [{\n "trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "speaker_scores": [log_prob_word1, log_prob_word2, ..., ],\n }]\n }\n ' self.env.reset() results = self._dijkstra() '\n return from self._dijkstra()\n [{\n "scan": XXX\n "instr_id":XXX,\n "instr_encoding": XXX\n "dijk_path": [v1, v2, ...., vn]\n "paths": [{\n "trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }]\n }]\n ' for result in results: lengths = [] num_paths = len(result['paths']) for path in result['paths']: assert (len(path['trajectory']) == (len(path['visual_feature']) + 1)) lengths.append(len(path['visual_feature'])) max_len = max(lengths) img_feats = torch.zeros(num_paths, max_len, 36, (self.feature_size + args.angle_feat_size)) can_feats = torch.zeros(num_paths, max_len, (self.feature_size + args.angle_feat_size)) for (j, path) in enumerate(result['paths']): for (k, feat) in enumerate(path['visual_feature']): (img_feat, can_feat) = feat img_feats[j][k] = img_feat can_feats[j][k] = can_feat (img_feats, can_feats) = (img_feats.cuda(), can_feats.cuda()) features = ((img_feats, can_feats), lengths) insts = np.array([result['instr_encoding'] for _ in range(num_paths)]) seq_lengths = np.argmax((insts == self.tok.word_to_index['<EOS>']), axis=1) insts = torch.from_numpy(insts).cuda() speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True) for (j, path) in enumerate(result['paths']): path.pop('visual_feature') path['speaker_scores'] = (- speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]) return results
-8,871,821,870,732,488,000
:param speaker: The speaker to be used in searching. :return: { "scan": XXX "instr_id":XXX, "instr_encoding": XXX "dijk_path": [v1, v2, ...., vn] "paths": [{ "trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ], "action": [act_1, act_2, ..., ], "listener_scores": [log_prob_act1, log_prob_act2, ..., ], "speaker_scores": [log_prob_word1, log_prob_word2, ..., ], }] }
r2r_src/agent.py
beam_search
rcorona/R2R-EnvDrop
python
def beam_search(self, speaker): '\n :param speaker: The speaker to be used in searching.\n :return:\n {\n "scan": XXX\n "instr_id":XXX,\n "instr_encoding": XXX\n "dijk_path": [v1, v2, ...., vn]\n "paths": [{\n "trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "speaker_scores": [log_prob_word1, log_prob_word2, ..., ],\n }]\n }\n ' self.env.reset() results = self._dijkstra() '\n return from self._dijkstra()\n [{\n "scan": XXX\n "instr_id":XXX,\n "instr_encoding": XXX\n "dijk_path": [v1, v2, ...., vn]\n "paths": [{\n "trajectory": [viewoint_id0, viewpoint_id1, viewpoint_id2, ..., ],\n "action": [act_1, act_2, ..., ],\n "listener_scores": [log_prob_act1, log_prob_act2, ..., ],\n "visual_feature": [(f1_step1, f2_step2, ...), (f1_step2, f2_step2, ...)\n }]\n }]\n ' for result in results: lengths = [] num_paths = len(result['paths']) for path in result['paths']: assert (len(path['trajectory']) == (len(path['visual_feature']) + 1)) lengths.append(len(path['visual_feature'])) max_len = max(lengths) img_feats = torch.zeros(num_paths, max_len, 36, (self.feature_size + args.angle_feat_size)) can_feats = torch.zeros(num_paths, max_len, (self.feature_size + args.angle_feat_size)) for (j, path) in enumerate(result['paths']): for (k, feat) in enumerate(path['visual_feature']): (img_feat, can_feat) = feat img_feats[j][k] = img_feat can_feats[j][k] = can_feat (img_feats, can_feats) = (img_feats.cuda(), can_feats.cuda()) features = ((img_feats, can_feats), lengths) insts = np.array([result['instr_encoding'] for _ in range(num_paths)]) seq_lengths = np.argmax((insts == self.tok.word_to_index['<EOS>']), axis=1) insts = torch.from_numpy(insts).cuda() speaker_scores = speaker.teacher_forcing(train=True, features=features, insts=insts, for_listener=True) for (j, path) in enumerate(result['paths']): path.pop('visual_feature') path['speaker_scores'] = (- speaker_scores[j].detach().cpu().numpy()[:seq_lengths[j]]) return results
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None): ' Evaluate once on each instruction in the current environment ' self.feedback = feedback if use_dropout: self.encoder.train() self.decoder.train() self.critic.train() else: self.encoder.eval() self.decoder.eval() self.critic.eval() super(Seq2SeqAgent, self).test(iters)
4,978,732,549,777,399,000
Evaluate once on each instruction in the current environment
r2r_src/agent.py
test
rcorona/R2R-EnvDrop
python
def test(self, use_dropout=False, feedback='argmax', allow_cheat=False, iters=None): ' ' self.feedback = feedback if use_dropout: self.encoder.train() self.decoder.train() self.critic.train() else: self.encoder.eval() self.decoder.eval() self.critic.eval() super(Seq2SeqAgent, self).test(iters)
def train(self, n_iters, feedback='teacher', **kwargs): ' Train for a given number of iterations ' self.feedback = feedback self.encoder.train() self.decoder.train() self.critic.train() self.losses = [] for iter in tqdm(range(1, (n_iters + 1))): self.encoder_optimizer.zero_grad() self.decoder_optimizer.zero_grad() self.critic_optimizer.zero_grad() self.loss = 0 if (feedback == 'teacher'): self.feedback = 'teacher' self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs) elif (feedback == 'sample'): if (args.ml_weight != 0): self.feedback = 'teacher' self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs) self.feedback = 'sample' self.rollout(train_ml=None, train_rl=True, **kwargs) else: assert False self.loss.backward() torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.0) torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.0) self.encoder_optimizer.step() self.decoder_optimizer.step() self.critic_optimizer.step()
8,232,530,218,817,938,000
Train for a given number of iterations
r2r_src/agent.py
train
rcorona/R2R-EnvDrop
python
def train(self, n_iters, feedback='teacher', **kwargs): ' ' self.feedback = feedback self.encoder.train() self.decoder.train() self.critic.train() self.losses = [] for iter in tqdm(range(1, (n_iters + 1))): self.encoder_optimizer.zero_grad() self.decoder_optimizer.zero_grad() self.critic_optimizer.zero_grad() self.loss = 0 if (feedback == 'teacher'): self.feedback = 'teacher' self.rollout(train_ml=args.teacher_weight, train_rl=False, **kwargs) elif (feedback == 'sample'): if (args.ml_weight != 0): self.feedback = 'teacher' self.rollout(train_ml=args.ml_weight, train_rl=False, **kwargs) self.feedback = 'sample' self.rollout(train_ml=None, train_rl=True, **kwargs) else: assert False self.loss.backward() torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.0) torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.0) self.encoder_optimizer.step() self.decoder_optimizer.step() self.critic_optimizer.step()
def save(self, epoch, path): ' Snapshot models ' (the_dir, _) = os.path.split(path) os.makedirs(the_dir, exist_ok=True) states = {} def create_state(name, model, optimizer): states[name] = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()} all_tuple = [('encoder', self.encoder, self.encoder_optimizer), ('decoder', self.decoder, self.decoder_optimizer), ('critic', self.critic, self.critic_optimizer)] for param in all_tuple: create_state(*param) torch.save(states, path)
-2,901,870,798,852,050,400
Snapshot models
r2r_src/agent.py
save
rcorona/R2R-EnvDrop
python
def save(self, epoch, path): ' ' (the_dir, _) = os.path.split(path) os.makedirs(the_dir, exist_ok=True) states = {} def create_state(name, model, optimizer): states[name] = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()} all_tuple = [('encoder', self.encoder, self.encoder_optimizer), ('decoder', self.decoder, self.decoder_optimizer), ('critic', self.critic, self.critic_optimizer)] for param in all_tuple: create_state(*param) torch.save(states, path)
def load(self, path): ' Loads parameters (but not training state) ' states = torch.load(path) def recover_state(name, model, optimizer): state = model.state_dict() model_keys = set(state.keys()) load_keys = set(states[name]['state_dict'].keys()) if (model_keys != load_keys): print('NOTICE: DIFFERENT KEYS IN THE LISTEREN') state.update(states[name]['state_dict']) model.load_state_dict(state) if args.loadOptim: optimizer.load_state_dict(states[name]['optimizer']) all_tuple = [('encoder', self.encoder, self.encoder_optimizer), ('decoder', self.decoder, self.decoder_optimizer), ('critic', self.critic, self.critic_optimizer)] for param in all_tuple: recover_state(*param) return (states['encoder']['epoch'] - 1)
3,282,851,448,400,201,700
Loads parameters (but not training state)
r2r_src/agent.py
load
rcorona/R2R-EnvDrop
python
def load(self, path): ' ' states = torch.load(path) def recover_state(name, model, optimizer): state = model.state_dict() model_keys = set(state.keys()) load_keys = set(states[name]['state_dict'].keys()) if (model_keys != load_keys): print('NOTICE: DIFFERENT KEYS IN THE LISTEREN') state.update(states[name]['state_dict']) model.load_state_dict(state) if args.loadOptim: optimizer.load_state_dict(states[name]['optimizer']) all_tuple = [('encoder', self.encoder, self.encoder_optimizer), ('decoder', self.decoder, self.decoder_optimizer), ('critic', self.critic, self.critic_optimizer)] for param in all_tuple: recover_state(*param) return (states['encoder']['epoch'] - 1)
def _get_local_ip_address(self): '\n Gets the local ip address of this computer\n @returns str Local IP address\n ' s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8', 80)) local_ip_address = s.getsockname()[0] s.close() return local_ip_address
7,471,134,649,321,523,000
Gets the local ip address of this computer @returns str Local IP address
openbci/wifi.py
_get_local_ip_address
daniellasry/OpenBCI_Python
python
def _get_local_ip_address(self): '\n Gets the local ip address of this computer\n @returns str Local IP address\n ' s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(('8.8.8.8', 80)) local_ip_address = s.getsockname()[0] s.close() return local_ip_address
def getBoardType(self): ' Returns the version of the board ' return self.board_type
-4,757,551,775,959,316,000
Returns the version of the board
openbci/wifi.py
getBoardType
daniellasry/OpenBCI_Python
python
def getBoardType(self): ' ' return self.board_type
def setImpedance(self, flag): ' Enable/disable impedance measure ' self.impedance = bool(flag)
-5,800,124,032,592,154,000
Enable/disable impedance measure
openbci/wifi.py
setImpedance
daniellasry/OpenBCI_Python
python
def setImpedance(self, flag): ' ' self.impedance = bool(flag)
def connect(self): ' Connect to the board and configure it. Note: recreates various objects upon call. ' if (self.ip_address is None): raise ValueError('self.ip_address cannot be None') if self.log: print(('Init WiFi connection with IP: ' + self.ip_address)) '\n Docs on these HTTP requests and more are found:\n https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0\n ' res_board = requests.get(('http://%s/board' % self.ip_address)) if (res_board.status_code == 200): board_info = res_board.json() if (not board_info['board_connected']): raise RuntimeError('No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide') self.board_type = board_info['board_type'] self.eeg_channels_per_sample = board_info['num_channels'] if self.log: print(('Connected to %s with %s channels' % (self.board_type, self.eeg_channels_per_sample))) self.gains = None if (self.board_type == k.BOARD_CYTON): self.gains = [24, 24, 24, 24, 24, 24, 24, 24] self.daisy = False elif (self.board_type == k.BOARD_DAISY): self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24] self.daisy = True elif (self.board_type == k.BOARD_GANGLION): self.gains = [51, 51, 51, 51] self.daisy = False self.local_wifi_server.set_daisy(daisy=self.daisy) self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type)) if self.high_speed: output_style = 'raw' else: output_style = 'json' res_tcp_post = requests.post(('http://%s/tcp' % self.ip_address), json={'ip': self.local_ip_address, 'port': self.local_wifi_server_port, 'output': output_style, 'delimiter': True, 'latency': self.latency}) if (res_tcp_post.status_code == 200): tcp_status = res_tcp_post.json() if tcp_status['connected']: if self.log: print('WiFi Shield to Python TCP Socket Established') else: raise RuntimeWarning('WiFi Shield is not able to connect to local server. Please open an issue.')
-1,779,810,696,293,935,000
Connect to the board and configure it. Note: recreates various objects upon call.
openbci/wifi.py
connect
daniellasry/OpenBCI_Python
python
def connect(self): ' ' if (self.ip_address is None): raise ValueError('self.ip_address cannot be None') if self.log: print(('Init WiFi connection with IP: ' + self.ip_address)) '\n Docs on these HTTP requests and more are found:\n https://app.swaggerhub.com/apis/pushtheworld/openbci-wifi-server/1.3.0\n ' res_board = requests.get(('http://%s/board' % self.ip_address)) if (res_board.status_code == 200): board_info = res_board.json() if (not board_info['board_connected']): raise RuntimeError('No board connected to WiFi Shield. To learn how to connect to a Cyton or Ganglion visit http://docs.openbci.com/Tutorials/03-Wifi_Getting_Started_Guide') self.board_type = board_info['board_type'] self.eeg_channels_per_sample = board_info['num_channels'] if self.log: print(('Connected to %s with %s channels' % (self.board_type, self.eeg_channels_per_sample))) self.gains = None if (self.board_type == k.BOARD_CYTON): self.gains = [24, 24, 24, 24, 24, 24, 24, 24] self.daisy = False elif (self.board_type == k.BOARD_DAISY): self.gains = [24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24] self.daisy = True elif (self.board_type == k.BOARD_GANGLION): self.gains = [51, 51, 51, 51] self.daisy = False self.local_wifi_server.set_daisy(daisy=self.daisy) self.local_wifi_server.set_parser(ParseRaw(gains=self.gains, board_type=self.board_type)) if self.high_speed: output_style = 'raw' else: output_style = 'json' res_tcp_post = requests.post(('http://%s/tcp' % self.ip_address), json={'ip': self.local_ip_address, 'port': self.local_wifi_server_port, 'output': output_style, 'delimiter': True, 'latency': self.latency}) if (res_tcp_post.status_code == 200): tcp_status = res_tcp_post.json() if tcp_status['connected']: if self.log: print('WiFi Shield to Python TCP Socket Established') else: raise RuntimeWarning('WiFi Shield is not able to connect to local server. Please open an issue.')
def init_streaming(self): ' Tell the board to record like crazy. ' res_stream_start = requests.get(('http://%s/stream/start' % self.ip_address)) if (res_stream_start.status_code == 200): self.streaming = True self.packets_dropped = 0 self.time_last_packet = timeit.default_timer() else: raise EnvironmentError(('Unable to start streaming. Check API for status code %d on /stream/start' % res_stream_start.status_code))
-482,764,792,112,558,660
Tell the board to record like crazy.
openbci/wifi.py
init_streaming
daniellasry/OpenBCI_Python
python
def init_streaming(self): ' ' res_stream_start = requests.get(('http://%s/stream/start' % self.ip_address)) if (res_stream_start.status_code == 200): self.streaming = True self.packets_dropped = 0 self.time_last_packet = timeit.default_timer() else: raise EnvironmentError(('Unable to start streaming. Check API for status code %d on /stream/start' % res_stream_start.status_code))
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None): 'Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege.' if self.log: print('Try to find WiFi shields on your local wireless network') print(('Scanning for %d seconds nearby devices...' % self.timeout)) list_ip = [] list_id = [] found_shield = False def wifi_shield_found(response): res = requests.get(response.location, verify=False).text device_description = xmltodict.parse(res) cur_shield_name = str(device_description['root']['device']['serialNumber']) cur_base_url = str(device_description['root']['URLBase']) cur_ip_address = re.findall('[0-9]+(?:\\.[0-9]+){3}', cur_base_url)[0] list_id.append(cur_shield_name) list_ip.append(cur_ip_address) found_shield = True if (shield_name is None): print(('Found WiFi Shield %s with IP Address %s' % (cur_shield_name, cur_ip_address))) if (wifi_shield_cb is not None): wifi_shield_cb(cur_ip_address) elif (shield_name == cur_shield_name): if (wifi_shield_cb is not None): wifi_shield_cb(cur_ip_address) ssdp_hits = ssdp.discover('urn:schemas-upnp-org:device:Basic:1', timeout=self.timeout, wifi_found_cb=wifi_shield_found) nb_wifi_shields = len(list_id) if (nb_wifi_shields < 1): print('No WiFi Shields found ;(') raise OSError('Cannot find OpenBCI WiFi Shield with local name') if (nb_wifi_shields > 1): print(((((('Found ' + str(nb_wifi_shields)) + ', selecting first named: ') + list_id[0]) + ' with IPV4: ') + list_ip[0])) return list_ip[0]
9,049,252,316,412,233,000
Detects Ganglion board MAC address -- if more than 1 around, will select first. Needs root privilege.
openbci/wifi.py
find_wifi_shield
daniellasry/OpenBCI_Python
python
def find_wifi_shield(self, shield_name=None, wifi_shield_cb=None): if self.log: print('Try to find WiFi shields on your local wireless network') print(('Scanning for %d seconds nearby devices...' % self.timeout)) list_ip = [] list_id = [] found_shield = False def wifi_shield_found(response): res = requests.get(response.location, verify=False).text device_description = xmltodict.parse(res) cur_shield_name = str(device_description['root']['device']['serialNumber']) cur_base_url = str(device_description['root']['URLBase']) cur_ip_address = re.findall('[0-9]+(?:\\.[0-9]+){3}', cur_base_url)[0] list_id.append(cur_shield_name) list_ip.append(cur_ip_address) found_shield = True if (shield_name is None): print(('Found WiFi Shield %s with IP Address %s' % (cur_shield_name, cur_ip_address))) if (wifi_shield_cb is not None): wifi_shield_cb(cur_ip_address) elif (shield_name == cur_shield_name): if (wifi_shield_cb is not None): wifi_shield_cb(cur_ip_address) ssdp_hits = ssdp.discover('urn:schemas-upnp-org:device:Basic:1', timeout=self.timeout, wifi_found_cb=wifi_shield_found) nb_wifi_shields = len(list_id) if (nb_wifi_shields < 1): print('No WiFi Shields found ;(') raise OSError('Cannot find OpenBCI WiFi Shield with local name') if (nb_wifi_shields > 1): print(((((('Found ' + str(nb_wifi_shields)) + ', selecting first named: ') + list_id[0]) + ' with IPV4: ') + list_ip[0])) return list_ip[0]
def wifi_write(self, output): '\n Pass through commands from the WiFi Shield to the Carrier board\n :param output:\n :return:\n ' res_command_post = requests.post(('http://%s/command' % self.ip_address), json={'command': output}) if (res_command_post.status_code == 200): ret_val = res_command_post.text if self.log: print(ret_val) return ret_val else: if self.log: print(('Error code: %d %s' % (res_command_post.status_code, res_command_post.text))) raise RuntimeError(('Error code: %d %s' % (res_command_post.status_code, res_command_post.text)))
-9,185,579,094,860,822,000
Pass through commands from the WiFi Shield to the Carrier board :param output: :return:
openbci/wifi.py
wifi_write
daniellasry/OpenBCI_Python
python
def wifi_write(self, output): '\n Pass through commands from the WiFi Shield to the Carrier board\n :param output:\n :return:\n ' res_command_post = requests.post(('http://%s/command' % self.ip_address), json={'command': output}) if (res_command_post.status_code == 200): ret_val = res_command_post.text if self.log: print(ret_val) return ret_val else: if self.log: print(('Error code: %d %s' % (res_command_post.status_code, res_command_post.text))) raise RuntimeError(('Error code: %d %s' % (res_command_post.status_code, res_command_post.text)))
def getNbEEGChannels(self): 'Will not get new data on impedance check.' return self.eeg_channels_per_sample
-2,454,272,197,531,395,600
Will not get new data on impedance check.
openbci/wifi.py
getNbEEGChannels
daniellasry/OpenBCI_Python
python
def getNbEEGChannels(self): return self.eeg_channels_per_sample
def start_streaming(self, callback, lapse=(- 1)): '\n Start handling streaming data from the board. Call a provided callback\n for every single sample that is processed\n\n Args:\n callback: A callback function -- or a list of functions -- that will receive a single argument of the\n OpenBCISample object captured.\n ' start_time = timeit.default_timer() if (not isinstance(callback, list)): self.local_wifi_server.set_callback(callback) else: self.local_wifi_server.set_callback(callback[0]) if (not self.streaming): self.init_streaming()
4,880,693,691,809,626,000
Start handling streaming data from the board. Call a provided callback for every single sample that is processed Args: callback: A callback function -- or a list of functions -- that will receive a single argument of the OpenBCISample object captured.
openbci/wifi.py
start_streaming
daniellasry/OpenBCI_Python
python
def start_streaming(self, callback, lapse=(- 1)): '\n Start handling streaming data from the board. Call a provided callback\n for every single sample that is processed\n\n Args:\n callback: A callback function -- or a list of functions -- that will receive a single argument of the\n OpenBCISample object captured.\n ' start_time = timeit.default_timer() if (not isinstance(callback, list)): self.local_wifi_server.set_callback(callback) else: self.local_wifi_server.set_callback(callback[0]) if (not self.streaming): self.init_streaming()
def test_signal(self, signal): ' Enable / disable test signal ' if (signal == 0): self.warn('Disabling synthetic square wave') try: self.wifi_write(']') except Exception as e: print(('Something went wrong while setting signal: ' + str(e))) elif (signal == 1): self.warn('Enabling synthetic square wave') try: self.wifi_write('[') except Exception as e: print(('Something went wrong while setting signal: ' + str(e))) else: self.warn(('%s is not a known test signal. Valid signal is 0-1' % signal))
7,795,635,748,368,347,000
Enable / disable test signal
openbci/wifi.py
test_signal
daniellasry/OpenBCI_Python
python
def test_signal(self, signal): ' ' if (signal == 0): self.warn('Disabling synthetic square wave') try: self.wifi_write(']') except Exception as e: print(('Something went wrong while setting signal: ' + str(e))) elif (signal == 1): self.warn('Enabling synthetic square wave') try: self.wifi_write('[') except Exception as e: print(('Something went wrong while setting signal: ' + str(e))) else: self.warn(('%s is not a known test signal. Valid signal is 0-1' % signal))
def set_channel(self, channel, toggle_position): ' Enable / disable channels ' try: if (channel > self.num_channels): raise ValueError('Cannot set non-existant channel') if (toggle_position == 1): if (channel is 1): self.wifi_write('!') if (channel is 2): self.wifi_write('@') if (channel is 3): self.wifi_write('#') if (channel is 4): self.wifi_write('$') if (channel is 5): self.wifi_write('%') if (channel is 6): self.wifi_write('^') if (channel is 7): self.wifi_write('&') if (channel is 8): self.wifi_write('*') if (channel is 9): self.wifi_write('Q') if (channel is 10): self.wifi_write('W') if (channel is 11): self.wifi_write('E') if (channel is 12): self.wifi_write('R') if (channel is 13): self.wifi_write('T') if (channel is 14): self.wifi_write('Y') if (channel is 15): self.wifi_write('U') if (channel is 16): self.wifi_write('I') elif (toggle_position == 0): if (channel is 1): self.wifi_write('1') if (channel is 2): self.wifi_write('2') if (channel is 3): self.wifi_write('3') if (channel is 4): self.wifi_write('4') if (channel is 5): self.wifi_write('5') if (channel is 6): self.wifi_write('6') if (channel is 7): self.wifi_write('7') if (channel is 8): self.wifi_write('8') if (channel is 9): self.wifi_write('q') if (channel is 10): self.wifi_write('w') if (channel is 11): self.wifi_write('e') if (channel is 12): self.wifi_write('r') if (channel is 13): self.wifi_write('t') if (channel is 14): self.wifi_write('y') if (channel is 15): self.wifi_write('u') if (channel is 16): self.wifi_write('i') except Exception as e: print(('Something went wrong while setting channels: ' + str(e)))
-451,990,832,647,603,140
Enable / disable channels
openbci/wifi.py
set_channel
daniellasry/OpenBCI_Python
python
def set_channel(self, channel, toggle_position): ' ' try: if (channel > self.num_channels): raise ValueError('Cannot set non-existant channel') if (toggle_position == 1): if (channel is 1): self.wifi_write('!') if (channel is 2): self.wifi_write('@') if (channel is 3): self.wifi_write('#') if (channel is 4): self.wifi_write('$') if (channel is 5): self.wifi_write('%') if (channel is 6): self.wifi_write('^') if (channel is 7): self.wifi_write('&') if (channel is 8): self.wifi_write('*') if (channel is 9): self.wifi_write('Q') if (channel is 10): self.wifi_write('W') if (channel is 11): self.wifi_write('E') if (channel is 12): self.wifi_write('R') if (channel is 13): self.wifi_write('T') if (channel is 14): self.wifi_write('Y') if (channel is 15): self.wifi_write('U') if (channel is 16): self.wifi_write('I') elif (toggle_position == 0): if (channel is 1): self.wifi_write('1') if (channel is 2): self.wifi_write('2') if (channel is 3): self.wifi_write('3') if (channel is 4): self.wifi_write('4') if (channel is 5): self.wifi_write('5') if (channel is 6): self.wifi_write('6') if (channel is 7): self.wifi_write('7') if (channel is 8): self.wifi_write('8') if (channel is 9): self.wifi_write('q') if (channel is 10): self.wifi_write('w') if (channel is 11): self.wifi_write('e') if (channel is 12): self.wifi_write('r') if (channel is 13): self.wifi_write('t') if (channel is 14): self.wifi_write('y') if (channel is 15): self.wifi_write('u') if (channel is 16): self.wifi_write('i') except Exception as e: print(('Something went wrong while setting channels: ' + str(e)))
def set_sample_rate(self, sample_rate): ' Change sample rate ' try: if ((self.board_type == k.BOARD_CYTON) or (self.board_type == k.BOARD_DAISY)): if (sample_rate == 250): self.wifi_write('~6') elif (sample_rate == 500): self.wifi_write('~5') elif (sample_rate == 1000): self.wifi_write('~4') elif (sample_rate == 2000): self.wifi_write('~3') elif (sample_rate == 4000): self.wifi_write('~2') elif (sample_rate == 8000): self.wifi_write('~1') elif (sample_rate == 16000): self.wifi_write('~0') else: print(('Sample rate not supported: ' + str(sample_rate))) elif (self.board_type == k.BOARD_GANGLION): if (sample_rate == 200): self.wifi_write('~7') elif (sample_rate == 400): self.wifi_write('~6') elif (sample_rate == 800): self.wifi_write('~5') elif (sample_rate == 1600): self.wifi_write('~4') elif (sample_rate == 3200): self.wifi_write('~3') elif (sample_rate == 6400): self.wifi_write('~2') elif (sample_rate == 12800): self.wifi_write('~1') elif (sample_rate == 25600): self.wifi_write('~0') else: print(('Sample rate not supported: ' + str(sample_rate))) else: print('Board type not supported for setting sample rate') except Exception as e: print(('Something went wrong while setting sample rate: ' + str(e)))
-6,714,558,995,829,764,000
Change sample rate
openbci/wifi.py
set_sample_rate
daniellasry/OpenBCI_Python
python
def set_sample_rate(self, sample_rate): ' ' try: if ((self.board_type == k.BOARD_CYTON) or (self.board_type == k.BOARD_DAISY)): if (sample_rate == 250): self.wifi_write('~6') elif (sample_rate == 500): self.wifi_write('~5') elif (sample_rate == 1000): self.wifi_write('~4') elif (sample_rate == 2000): self.wifi_write('~3') elif (sample_rate == 4000): self.wifi_write('~2') elif (sample_rate == 8000): self.wifi_write('~1') elif (sample_rate == 16000): self.wifi_write('~0') else: print(('Sample rate not supported: ' + str(sample_rate))) elif (self.board_type == k.BOARD_GANGLION): if (sample_rate == 200): self.wifi_write('~7') elif (sample_rate == 400): self.wifi_write('~6') elif (sample_rate == 800): self.wifi_write('~5') elif (sample_rate == 1600): self.wifi_write('~4') elif (sample_rate == 3200): self.wifi_write('~3') elif (sample_rate == 6400): self.wifi_write('~2') elif (sample_rate == 12800): self.wifi_write('~1') elif (sample_rate == 25600): self.wifi_write('~0') else: print(('Sample rate not supported: ' + str(sample_rate))) else: print('Board type not supported for setting sample rate') except Exception as e: print(('Something went wrong while setting sample rate: ' + str(e)))
def set_accelerometer(self, toggle_position): ' Enable / disable accelerometer ' try: if (self.board_type == k.BOARD_GANGLION): if (toggle_position == 1): self.wifi_write('n') elif (toggle_position == 0): self.wifi_write('N') else: print('Board type not supported for setting accelerometer') except Exception as e: print(('Something went wrong while setting accelerometer: ' + str(e)))
-4,555,134,248,605,669,400
Enable / disable accelerometer
openbci/wifi.py
set_accelerometer
daniellasry/OpenBCI_Python
python
def set_accelerometer(self, toggle_position): ' ' try: if (self.board_type == k.BOARD_GANGLION): if (toggle_position == 1): self.wifi_write('n') elif (toggle_position == 0): self.wifi_write('N') else: print('Board type not supported for setting accelerometer') except Exception as e: print(('Something went wrong while setting accelerometer: ' + str(e)))
def check_connection(self): ' Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost.' if (not self.streaming): return if (self.packets_dropped > self.max_packets_to_skip): self.warn('Too many packets dropped, attempt to reconnect') self.reconnect() elif ((self.timeout > 0) and ((timeit.default_timer() - self.time_last_packet) > self.timeout)): self.warn('Too long since got new data, attempt to reconnect') self.reconnect()
-7,697,840,872,393,702,000
Check connection quality in term of lag and number of packets drop. Reinit connection if necessary. FIXME: parameters given to the board will be lost.
openbci/wifi.py
check_connection
daniellasry/OpenBCI_Python
python
def check_connection(self): ' ' if (not self.streaming): return if (self.packets_dropped > self.max_packets_to_skip): self.warn('Too many packets dropped, attempt to reconnect') self.reconnect() elif ((self.timeout > 0) and ((timeit.default_timer() - self.time_last_packet) > self.timeout)): self.warn('Too long since got new data, attempt to reconnect') self.reconnect()
def reconnect(self): ' In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost.' self.warn('Reconnecting') self.stop() self.disconnect() self.connect() self.init_streaming()
4,734,633,004,977,587,000
In case of poor connection, will shut down and relaunch everything. FIXME: parameters given to the board will be lost.
openbci/wifi.py
reconnect
daniellasry/OpenBCI_Python
python
def reconnect(self): ' ' self.warn('Reconnecting') self.stop() self.disconnect() self.connect() self.init_streaming()
@used def type_from_ast(ast_node: ast.AST, visitor: Optional['NameCheckVisitor']=None, ctx: Optional[Context]=None) -> Value: 'Given an AST node representing an annotation, return a\n :class:`Value <pyanalyze.value.Value>`.\n\n :param ast_node: AST node to evaluate.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, ast_node) return _type_from_ast(ast_node, ctx)
6,403,837,150,855,721,000
Given an AST node representing an annotation, return a :class:`Value <pyanalyze.value.Value>`. :param ast_node: AST node to evaluate. :param visitor: Visitor class to use. This is used in the default :class:`Context` to resolve names and show errors. This is ignored if `ctx` is given. :param ctx: :class:`Context` to use for evaluation.
pyanalyze/annotations.py
type_from_ast
nbdaaron/pyanalyze
python
@used def type_from_ast(ast_node: ast.AST, visitor: Optional['NameCheckVisitor']=None, ctx: Optional[Context]=None) -> Value: 'Given an AST node representing an annotation, return a\n :class:`Value <pyanalyze.value.Value>`.\n\n :param ast_node: AST node to evaluate.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, ast_node) return _type_from_ast(ast_node, ctx)
def type_from_runtime(val: object, visitor: Optional['NameCheckVisitor']=None, node: Optional[ast.AST]=None, globals: Optional[Mapping[(str, object)]]=None, ctx: Optional[Context]=None) -> Value: 'Given a runtime annotation object, return a\n :class:`Value <pyanalyze.value.Value>`.\n\n :param val: Object to evaluate. This will usually come from an\n ``__annotations__`` dictionary.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param node: AST node that the annotation derives from. This is\n used for showing errors. Ignored if `ctx` is given.\n\n :param globals: Dictionary of global variables that can be used\n to resolve names. Ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, node, globals) return _type_from_runtime(val, ctx)
-3,828,119,079,951,672,300
Given a runtime annotation object, return a :class:`Value <pyanalyze.value.Value>`. :param val: Object to evaluate. This will usually come from an ``__annotations__`` dictionary. :param visitor: Visitor class to use. This is used in the default :class:`Context` to resolve names and show errors. This is ignored if `ctx` is given. :param node: AST node that the annotation derives from. This is used for showing errors. Ignored if `ctx` is given. :param globals: Dictionary of global variables that can be used to resolve names. Ignored if `ctx` is given. :param ctx: :class:`Context` to use for evaluation.
pyanalyze/annotations.py
type_from_runtime
nbdaaron/pyanalyze
python
def type_from_runtime(val: object, visitor: Optional['NameCheckVisitor']=None, node: Optional[ast.AST]=None, globals: Optional[Mapping[(str, object)]]=None, ctx: Optional[Context]=None) -> Value: 'Given a runtime annotation object, return a\n :class:`Value <pyanalyze.value.Value>`.\n\n :param val: Object to evaluate. This will usually come from an\n ``__annotations__`` dictionary.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param node: AST node that the annotation derives from. This is\n used for showing errors. Ignored if `ctx` is given.\n\n :param globals: Dictionary of global variables that can be used\n to resolve names. Ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, node, globals) return _type_from_runtime(val, ctx)
def type_from_value(value: Value, visitor: Optional['NameCheckVisitor']=None, node: Optional[ast.AST]=None, ctx: Optional[Context]=None, is_typeddict: bool=False) -> Value: 'Given a :class:`Value <pyanalyze.value.Value` representing an annotation,\n return a :class:`Value <pyanalyze.value.Value>` representing the type.\n\n The input value represents an expression, the output value represents\n a type. For example, the :term:`impl` of ``typing.cast(typ, val)``\n calls :func:`type_from_value` on the value it receives for its\n `typ` argument and returns the result.\n\n :param value: :class:`Value <pyanalyze.value.Value` to evaluate.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param node: AST node that the annotation derives from. This is\n used for showing errors. Ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n :param is_typeddict: Whether we are at the top level of a `TypedDict`\n definition.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, node) return _type_from_value(value, ctx, is_typeddict=is_typeddict)
4,292,624,891,465,387,000
Given a :class:`Value <pyanalyze.value.Value` representing an annotation, return a :class:`Value <pyanalyze.value.Value>` representing the type. The input value represents an expression, the output value represents a type. For example, the :term:`impl` of ``typing.cast(typ, val)`` calls :func:`type_from_value` on the value it receives for its `typ` argument and returns the result. :param value: :class:`Value <pyanalyze.value.Value` to evaluate. :param visitor: Visitor class to use. This is used in the default :class:`Context` to resolve names and show errors. This is ignored if `ctx` is given. :param node: AST node that the annotation derives from. This is used for showing errors. Ignored if `ctx` is given. :param ctx: :class:`Context` to use for evaluation. :param is_typeddict: Whether we are at the top level of a `TypedDict` definition.
pyanalyze/annotations.py
type_from_value
nbdaaron/pyanalyze
python
def type_from_value(value: Value, visitor: Optional['NameCheckVisitor']=None, node: Optional[ast.AST]=None, ctx: Optional[Context]=None, is_typeddict: bool=False) -> Value: 'Given a :class:`Value <pyanalyze.value.Value` representing an annotation,\n return a :class:`Value <pyanalyze.value.Value>` representing the type.\n\n The input value represents an expression, the output value represents\n a type. For example, the :term:`impl` of ``typing.cast(typ, val)``\n calls :func:`type_from_value` on the value it receives for its\n `typ` argument and returns the result.\n\n :param value: :class:`Value <pyanalyze.value.Value` to evaluate.\n\n :param visitor: Visitor class to use. This is used in the default\n :class:`Context` to resolve names and show errors.\n This is ignored if `ctx` is given.\n\n :param node: AST node that the annotation derives from. This is\n used for showing errors. Ignored if `ctx` is given.\n\n :param ctx: :class:`Context` to use for evaluation.\n\n :param is_typeddict: Whether we are at the top level of a `TypedDict`\n definition.\n\n ' if (ctx is None): ctx = _DefaultContext(visitor, node) return _type_from_value(value, ctx, is_typeddict=is_typeddict)
def suppress_undefined_names(self) -> ContextManager[None]: 'Temporarily suppress errors about undefined names.' return qcore.override(self, 'should_suppress_undefined_names', True)
-5,153,936,227,865,077,000
Temporarily suppress errors about undefined names.
pyanalyze/annotations.py
suppress_undefined_names
nbdaaron/pyanalyze
python
def suppress_undefined_names(self) -> ContextManager[None]: return qcore.override(self, 'should_suppress_undefined_names', True)
def show_error(self, message: str, error_code: ErrorCode=ErrorCode.invalid_annotation, node: Optional[ast.AST]=None) -> None: 'Show an error found while evaluating an annotation.' pass
8,157,879,884,960,985,000
Show an error found while evaluating an annotation.
pyanalyze/annotations.py
show_error
nbdaaron/pyanalyze
python
def show_error(self, message: str, error_code: ErrorCode=ErrorCode.invalid_annotation, node: Optional[ast.AST]=None) -> None: pass
def get_name(self, node: ast.Name) -> Value: 'Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.' return AnyValue(AnySource.inference)
-6,227,226,071,517,584,000
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
pyanalyze/annotations.py
get_name
nbdaaron/pyanalyze
python
def get_name(self, node: ast.Name) -> Value: return AnyValue(AnySource.inference)
def get_name(self, node: ast.Name) -> Value: 'Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.' return self.get_name_from_globals(node.id, self.globals)
4,904,100,106,146,036,000
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
pyanalyze/annotations.py
get_name
nbdaaron/pyanalyze
python
def get_name(self, node: ast.Name) -> Value: return self.get_name_from_globals(node.id, self.globals)
def get_name(self, node: ast.Name) -> Value: 'Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.' return self.annotations_context.get_name(node)
7,946,218,256,446,995,000
Return the :class:`Value <pyanalyze.value.Value>` corresponding to a name.
pyanalyze/annotations.py
get_name
nbdaaron/pyanalyze
python
def get_name(self, node: ast.Name) -> Value: return self.annotations_context.get_name(node)
def __virtual__(): '\n Only return if python-etcd is installed\n ' return (__virtualname__ if HAS_LIBS else False)
337,201,942,222,086,850
Only return if python-etcd is installed
salt/pillar/vmware_pillar.py
__virtual__
aaannz/salt-1
python
def __virtual__(): '\n \n ' return (__virtualname__ if HAS_LIBS else False)
def ext_pillar(minion_id, pillar, **kwargs): '\n Check vmware/vcenter for all data\n ' vmware_pillar = {} host = None username = None password = None property_types = [] property_name = 'name' protocol = None port = None pillar_key = 'vmware' replace_default_attributes = False type_specific_pillar_attributes = {'VirtualMachine': [{'config': ['version', 'guestId', 'files', 'tools', 'flags', 'memoryHotAddEnabled', 'cpuHotAddEnabled', 'cpuHotRemoveEnabled', 'datastoreUrl', 'swapPlacement', 'bootOptions', 'scheduledHardwareUpgradeInfo', 'memoryAllocation', 'cpuAllocation']}, {'summary': [{'runtime': [{'host': ['name', {'parent': 'name'}]}, 'bootTime']}, {'guest': ['toolsStatus', 'toolsVersionStatus', 'toolsVersionStatus2', 'toolsRunningStatus']}, {'config': ['cpuReservation', 'memoryReservation']}, {'storage': ['committed', 'uncommitted', 'unshared']}, {'dasVmProtection': ['dasProtected']}]}, {'storage': [{'perDatastoreUsage': [{'datastore': 'name'}, 'committed', 'uncommitted', 'unshared']}]}], 'HostSystem': [{'datastore': ['name', 'overallStatus', {'summary': ['url', 'freeSpace', 'maxFileSize', 'maxVirtualDiskCapacity', 'maxPhysicalRDMFileSize', 'maxVirtualRDMFileSize', {'vmfs': ['capacity', 'blockSizeMb', 'maxBlocks', 'majorVersion', 'version', 'uuid', {'extent': ['diskName', 'partition']}, 'vmfsUpgradeable', 'ssd', 'local']}]}, {'vm': 'name'}]}, {'vm': ['name', 'overallStatus', {'summary': [{'runtime': 'powerState'}]}]}]} pillar_attributes = [{'summary': ['overallStatus']}, {'network': ['name', {'config': {'distributedVirtualSwitch': 'name'}}]}, {'datastore': ['name']}, {'parent': ['name']}] if ('pillar_key' in kwargs): pillar_key = kwargs['pillar_key'] vmware_pillar[pillar_key] = {} if ('host' not in kwargs): log.error('VMWare external pillar configured but host is not specified in ext_pillar configuration.') return vmware_pillar else: host = kwargs['host'] log.debug('vmware_pillar -- host = %s', host) if ('username' not in kwargs): log.error('VMWare external pillar requested but username is not specified in ext_pillar configuration.') return vmware_pillar else: username = kwargs['username'] log.debug('vmware_pillar -- username = %s', username) if ('password' not in kwargs): log.error('VMWare external pillar requested but password is not specified in ext_pillar configuration.') return vmware_pillar else: password = kwargs['password'] log.debug('vmware_pillar -- password = %s', password) if ('replace_default_attributes' in kwargs): replace_default_attributes = kwargs['replace_default_attributes'] if replace_default_attributes: pillar_attributes = [] type_specific_pillar_attributes = {} if ('property_types' in kwargs): for prop_type in kwargs['property_types']: if isinstance(prop_type, dict): property_types.append(getattr(vim, prop_type.keys()[0])) if isinstance(prop_type[prop_type.keys()[0]], list): pillar_attributes = (pillar_attributes + prop_type[prop_type.keys()[0]]) else: log.warning('A property_type dict was specified, but its value is not a list') else: property_types.append(getattr(vim, prop_type)) else: property_types = [vim.VirtualMachine] log.debug('vmware_pillar -- property_types = %s', property_types) if ('property_name' in kwargs): property_name = kwargs['property_name'] else: property_name = 'name' log.debug('vmware_pillar -- property_name = %s', property_name) if ('protocol' in kwargs): protocol = kwargs['protocol'] log.debug('vmware_pillar -- protocol = %s', protocol) if ('port' in kwargs): port = kwargs['port'] log.debug('vmware_pillar -- port = %s', port) virtualgrain = None osgrain = None if ('virtual' in __grains__): virtualgrain = __grains__['virtual'].lower() if ('os' in __grains__): osgrain = __grains__['os'].lower() if ((virtualgrain == 'vmware') or (osgrain == 'vmware esxi') or (osgrain == 'esxi')): vmware_pillar[pillar_key] = {} try: _conn = salt.utils.vmware.get_service_instance(host, username, password, protocol, port, verify_ssl=kwargs.get('verify_ssl', True)) if _conn: data = None for prop_type in property_types: data = salt.utils.vmware.get_mor_by_property(_conn, prop_type, minion_id, property_name=property_name) if data: type_name = type(data).__name__.replace('vim.', '') if hasattr(data, 'availableField'): vmware_pillar[pillar_key]['annotations'] = {} for availableField in data.availableField: for customValue in data.customValue: if (availableField.key == customValue.key): vmware_pillar[pillar_key]['annotations'][availableField.name] = customValue.value type_specific_pillar_attribute = [] if (type_name in type_specific_pillar_attributes): type_specific_pillar_attribute = type_specific_pillar_attributes[type_name] vmware_pillar[pillar_key] = dictupdate.update(vmware_pillar[pillar_key], _crawl_attribute(data, (pillar_attributes + type_specific_pillar_attribute))) break Disconnect(_conn) else: log.error('Unable to obtain a connection with %s, please verify your vmware ext_pillar configuration', host) except RuntimeError: log.error('A runtime error occurred in the vmware_pillar, this is likely caused by an infinite recursion in a requested attribute. Verify your requested attributes and reconfigure the pillar.') return vmware_pillar else: return {}
-1,612,344,056,009,843,000
Check vmware/vcenter for all data
salt/pillar/vmware_pillar.py
ext_pillar
aaannz/salt-1
python
def ext_pillar(minion_id, pillar, **kwargs): '\n \n ' vmware_pillar = {} host = None username = None password = None property_types = [] property_name = 'name' protocol = None port = None pillar_key = 'vmware' replace_default_attributes = False type_specific_pillar_attributes = {'VirtualMachine': [{'config': ['version', 'guestId', 'files', 'tools', 'flags', 'memoryHotAddEnabled', 'cpuHotAddEnabled', 'cpuHotRemoveEnabled', 'datastoreUrl', 'swapPlacement', 'bootOptions', 'scheduledHardwareUpgradeInfo', 'memoryAllocation', 'cpuAllocation']}, {'summary': [{'runtime': [{'host': ['name', {'parent': 'name'}]}, 'bootTime']}, {'guest': ['toolsStatus', 'toolsVersionStatus', 'toolsVersionStatus2', 'toolsRunningStatus']}, {'config': ['cpuReservation', 'memoryReservation']}, {'storage': ['committed', 'uncommitted', 'unshared']}, {'dasVmProtection': ['dasProtected']}]}, {'storage': [{'perDatastoreUsage': [{'datastore': 'name'}, 'committed', 'uncommitted', 'unshared']}]}], 'HostSystem': [{'datastore': ['name', 'overallStatus', {'summary': ['url', 'freeSpace', 'maxFileSize', 'maxVirtualDiskCapacity', 'maxPhysicalRDMFileSize', 'maxVirtualRDMFileSize', {'vmfs': ['capacity', 'blockSizeMb', 'maxBlocks', 'majorVersion', 'version', 'uuid', {'extent': ['diskName', 'partition']}, 'vmfsUpgradeable', 'ssd', 'local']}]}, {'vm': 'name'}]}, {'vm': ['name', 'overallStatus', {'summary': [{'runtime': 'powerState'}]}]}]} pillar_attributes = [{'summary': ['overallStatus']}, {'network': ['name', {'config': {'distributedVirtualSwitch': 'name'}}]}, {'datastore': ['name']}, {'parent': ['name']}] if ('pillar_key' in kwargs): pillar_key = kwargs['pillar_key'] vmware_pillar[pillar_key] = {} if ('host' not in kwargs): log.error('VMWare external pillar configured but host is not specified in ext_pillar configuration.') return vmware_pillar else: host = kwargs['host'] log.debug('vmware_pillar -- host = %s', host) if ('username' not in kwargs): log.error('VMWare external pillar requested but username is not specified in ext_pillar configuration.') return vmware_pillar else: username = kwargs['username'] log.debug('vmware_pillar -- username = %s', username) if ('password' not in kwargs): log.error('VMWare external pillar requested but password is not specified in ext_pillar configuration.') return vmware_pillar else: password = kwargs['password'] log.debug('vmware_pillar -- password = %s', password) if ('replace_default_attributes' in kwargs): replace_default_attributes = kwargs['replace_default_attributes'] if replace_default_attributes: pillar_attributes = [] type_specific_pillar_attributes = {} if ('property_types' in kwargs): for prop_type in kwargs['property_types']: if isinstance(prop_type, dict): property_types.append(getattr(vim, prop_type.keys()[0])) if isinstance(prop_type[prop_type.keys()[0]], list): pillar_attributes = (pillar_attributes + prop_type[prop_type.keys()[0]]) else: log.warning('A property_type dict was specified, but its value is not a list') else: property_types.append(getattr(vim, prop_type)) else: property_types = [vim.VirtualMachine] log.debug('vmware_pillar -- property_types = %s', property_types) if ('property_name' in kwargs): property_name = kwargs['property_name'] else: property_name = 'name' log.debug('vmware_pillar -- property_name = %s', property_name) if ('protocol' in kwargs): protocol = kwargs['protocol'] log.debug('vmware_pillar -- protocol = %s', protocol) if ('port' in kwargs): port = kwargs['port'] log.debug('vmware_pillar -- port = %s', port) virtualgrain = None osgrain = None if ('virtual' in __grains__): virtualgrain = __grains__['virtual'].lower() if ('os' in __grains__): osgrain = __grains__['os'].lower() if ((virtualgrain == 'vmware') or (osgrain == 'vmware esxi') or (osgrain == 'esxi')): vmware_pillar[pillar_key] = {} try: _conn = salt.utils.vmware.get_service_instance(host, username, password, protocol, port, verify_ssl=kwargs.get('verify_ssl', True)) if _conn: data = None for prop_type in property_types: data = salt.utils.vmware.get_mor_by_property(_conn, prop_type, minion_id, property_name=property_name) if data: type_name = type(data).__name__.replace('vim.', ) if hasattr(data, 'availableField'): vmware_pillar[pillar_key]['annotations'] = {} for availableField in data.availableField: for customValue in data.customValue: if (availableField.key == customValue.key): vmware_pillar[pillar_key]['annotations'][availableField.name] = customValue.value type_specific_pillar_attribute = [] if (type_name in type_specific_pillar_attributes): type_specific_pillar_attribute = type_specific_pillar_attributes[type_name] vmware_pillar[pillar_key] = dictupdate.update(vmware_pillar[pillar_key], _crawl_attribute(data, (pillar_attributes + type_specific_pillar_attribute))) break Disconnect(_conn) else: log.error('Unable to obtain a connection with %s, please verify your vmware ext_pillar configuration', host) except RuntimeError: log.error('A runtime error occurred in the vmware_pillar, this is likely caused by an infinite recursion in a requested attribute. Verify your requested attributes and reconfigure the pillar.') return vmware_pillar else: return {}
def _recurse_config_to_dict(t_data): '\n helper function to recurse through a vim object and attempt to return all child objects\n ' if (not isinstance(t_data, type(None))): if isinstance(t_data, list): t_list = [] for i in t_data: t_list.append(_recurse_config_to_dict(i)) return t_list elif isinstance(t_data, dict): t_dict = {} for (k, v) in six.iteritems(t_data): t_dict[k] = _recurse_config_to_dict(v) return t_dict elif hasattr(t_data, '__dict__'): return _recurse_config_to_dict(t_data.__dict__) else: return _serializer(t_data)
7,516,975,427,656,124,000
helper function to recurse through a vim object and attempt to return all child objects
salt/pillar/vmware_pillar.py
_recurse_config_to_dict
aaannz/salt-1
python
def _recurse_config_to_dict(t_data): '\n \n ' if (not isinstance(t_data, type(None))): if isinstance(t_data, list): t_list = [] for i in t_data: t_list.append(_recurse_config_to_dict(i)) return t_list elif isinstance(t_data, dict): t_dict = {} for (k, v) in six.iteritems(t_data): t_dict[k] = _recurse_config_to_dict(v) return t_dict elif hasattr(t_data, '__dict__'): return _recurse_config_to_dict(t_data.__dict__) else: return _serializer(t_data)
def _crawl_attribute(this_data, this_attr): '\n helper function to crawl an attribute specified for retrieval\n ' if isinstance(this_data, list): t_list = [] for d in this_data: t_list.append(_crawl_attribute(d, this_attr)) return t_list elif isinstance(this_attr, dict): t_dict = {} for k in this_attr: if hasattr(this_data, k): t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k]) return t_dict elif isinstance(this_attr, list): this_dict = {} for l in this_attr: this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l)) return this_dict else: return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
-6,944,335,053,640,380,000
helper function to crawl an attribute specified for retrieval
salt/pillar/vmware_pillar.py
_crawl_attribute
aaannz/salt-1
python
def _crawl_attribute(this_data, this_attr): '\n \n ' if isinstance(this_data, list): t_list = [] for d in this_data: t_list.append(_crawl_attribute(d, this_attr)) return t_list elif isinstance(this_attr, dict): t_dict = {} for k in this_attr: if hasattr(this_data, k): t_dict[k] = _crawl_attribute(getattr(this_data, k, None), this_attr[k]) return t_dict elif isinstance(this_attr, list): this_dict = {} for l in this_attr: this_dict = dictupdate.update(this_dict, _crawl_attribute(this_data, l)) return this_dict else: return {this_attr: _recurse_config_to_dict(getattr(this_data, this_attr, None))}
def _serializer(obj): '\n helper function to serialize some objects for prettier return\n ' import datetime if isinstance(obj, datetime.datetime): if (obj.utcoffset() is not None): obj = (obj - obj.utcoffset()) return obj.__str__() return obj
-3,098,185,855,057,940,000
helper function to serialize some objects for prettier return
salt/pillar/vmware_pillar.py
_serializer
aaannz/salt-1
python
def _serializer(obj): '\n \n ' import datetime if isinstance(obj, datetime.datetime): if (obj.utcoffset() is not None): obj = (obj - obj.utcoffset()) return obj.__str__() return obj
def sjoin(left_df, right_df, how='inner', op='intersects', lsuffix='left', rsuffix='right'): "Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersects'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n " if (not isinstance(left_df, GeoDataFrame)): raise ValueError("'left_df' should be GeoDataFrame, got {}".format(type(left_df))) if (not isinstance(right_df, GeoDataFrame)): raise ValueError("'right_df' should be GeoDataFrame, got {}".format(type(right_df))) allowed_hows = ['left', 'right', 'inner'] if (how not in allowed_hows): raise ValueError(('`how` was "%s" but is expected to be in %s' % (how, allowed_hows))) allowed_ops = ['contains', 'within', 'intersects'] if (op not in allowed_ops): raise ValueError(('`op` was "%s" but is expected to be in %s' % (op, allowed_ops))) if (not _check_crs(left_df, right_df)): _crs_mismatch_warn(left_df, right_df, stacklevel=3) index_left = ('index_%s' % lsuffix) index_right = ('index_%s' % rsuffix) if (any(left_df.columns.isin([index_left, index_right])) or any(right_df.columns.isin([index_left, index_right]))): raise ValueError("'{0}' and '{1}' cannot be names in the frames being joined".format(index_left, index_right)) if (right_df._sindex_generated or ((not left_df._sindex_generated) and (right_df.shape[0] > left_df.shape[0]))): tree_idx = (right_df.sindex if (len(left_df) > 0) else None) tree_idx_right = True else: tree_idx = (left_df.sindex if (len(right_df) > 0) else None) tree_idx_right = False left_df = left_df.copy(deep=True) try: left_index_name = left_df.index.name left_df.index = left_df.index.rename(index_left) except TypeError: index_left = [(('index_%s' % lsuffix) + str(pos)) for (pos, ix) in enumerate(left_df.index.names)] left_index_name = left_df.index.names left_df.index = left_df.index.rename(index_left) left_df = left_df.reset_index() right_df = right_df.copy(deep=True) try: right_index_name = right_df.index.name right_df.index = right_df.index.rename(index_right) except TypeError: index_right = [(('index_%s' % rsuffix) + str(pos)) for (pos, ix) in enumerate(right_df.index.names)] right_index_name = right_df.index.names right_df.index = right_df.index.rename(index_right) right_df = right_df.reset_index() if (op == 'within'): (left_df, right_df) = (right_df, left_df) tree_idx_right = (not tree_idx_right) r_idx = np.empty((0, 0)) l_idx = np.empty((0, 0)) if (tree_idx_right and tree_idx): idxmatch = left_df.geometry.apply((lambda x: x.bounds)).apply((lambda x: (list(tree_idx.intersection(x)) if (not (x == ())) else []))) idxmatch = idxmatch[(idxmatch.apply(len) > 0)] if (idxmatch.shape[0] > 0): r_idx = np.concatenate(idxmatch.values) l_idx = np.concatenate([([i] * len(v)) for (i, v) in idxmatch.iteritems()]) elif ((not tree_idx_right) and tree_idx): idxmatch = right_df.geometry.apply((lambda x: x.bounds)).apply((lambda x: (list(tree_idx.intersection(x)) if (not (x == ())) else []))) idxmatch = idxmatch[(idxmatch.apply(len) > 0)] if (idxmatch.shape[0] > 0): l_idx = np.concatenate(idxmatch.values) r_idx = np.concatenate([([i] * len(v)) for (i, v) in idxmatch.iteritems()]) if ((len(r_idx) > 0) and (len(l_idx) > 0)): if compat.USE_PYGEOS: import pygeos predicate_d = {'intersects': pygeos.intersects, 'contains': pygeos.contains, 'within': pygeos.contains} check_predicates = predicate_d[op] else: def find_intersects(a1, a2): return a1.intersects(a2) def find_contains(a1, a2): return a1.contains(a2) predicate_d = {'intersects': find_intersects, 'contains': find_contains, 'within': find_contains} check_predicates = np.vectorize(predicate_d[op]) if compat.USE_PYGEOS: res = check_predicates(left_df.geometry[l_idx].values.data, right_df[right_df.geometry.name][r_idx].values.data) else: res = check_predicates(left_df.geometry.apply((lambda x: prepared.prep(x)))[l_idx], right_df[right_df.geometry.name][r_idx]) result = pd.DataFrame(np.column_stack([l_idx, r_idx, res])) result.columns = ['_key_left', '_key_right', 'match_bool'] result = pd.DataFrame(result[(result['match_bool'] == 1)]).drop('match_bool', axis=1) else: result = pd.DataFrame(columns=['_key_left', '_key_right'], dtype=float) if (op == 'within'): (left_df, right_df) = (right_df, left_df) result = result.rename(columns={'_key_left': '_key_right', '_key_right': '_key_left'}) if (how == 'inner'): result = result.set_index('_key_left') joined = left_df.merge(result, left_index=True, right_index=True).merge(right_df.drop(right_df.geometry.name, axis=1), left_on='_key_right', right_index=True, suffixes=(('_%s' % lsuffix), ('_%s' % rsuffix))).set_index(index_left).drop(['_key_right'], axis=1) if isinstance(index_left, list): joined.index.names = left_index_name else: joined.index.name = left_index_name elif (how == 'left'): result = result.set_index('_key_left') joined = left_df.merge(result, left_index=True, right_index=True, how='left').merge(right_df.drop(right_df.geometry.name, axis=1), how='left', left_on='_key_right', right_index=True, suffixes=(('_%s' % lsuffix), ('_%s' % rsuffix))).set_index(index_left).drop(['_key_right'], axis=1) if isinstance(index_left, list): joined.index.names = left_index_name else: joined.index.name = left_index_name else: joined = left_df.drop(left_df.geometry.name, axis=1).merge(result.merge(right_df, left_on='_key_right', right_index=True, how='right'), left_index=True, right_on='_key_left', how='right').set_index(index_right).drop(['_key_left', '_key_right'], axis=1) if isinstance(index_right, list): joined.index.names = right_index_name else: joined.index.name = right_index_name return joined
-7,132,081,164,258,639,000
Spatial join of two GeoDataFrames. Parameters ---------- left_df, right_df : GeoDataFrames how : string, default 'inner' The type of join: * 'left': use keys from left_df; retain only left_df geometry column * 'right': use keys from right_df; retain only right_df geometry column * 'inner': use intersection of keys from both dfs; retain only left_df geometry column op : string, default 'intersects' Binary predicate, one of {'intersects', 'contains', 'within'}. See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates. lsuffix : string, default 'left' Suffix to apply to overlapping column names (left GeoDataFrame). rsuffix : string, default 'right' Suffix to apply to overlapping column names (right GeoDataFrame).
geopandas/tools/sjoin.py
sjoin
anathnathphy67/geopandas
python
def sjoin(left_df, right_df, how='inner', op='intersects', lsuffix='left', rsuffix='right'): "Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersects'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n " if (not isinstance(left_df, GeoDataFrame)): raise ValueError("'left_df' should be GeoDataFrame, got {}".format(type(left_df))) if (not isinstance(right_df, GeoDataFrame)): raise ValueError("'right_df' should be GeoDataFrame, got {}".format(type(right_df))) allowed_hows = ['left', 'right', 'inner'] if (how not in allowed_hows): raise ValueError(('`how` was "%s" but is expected to be in %s' % (how, allowed_hows))) allowed_ops = ['contains', 'within', 'intersects'] if (op not in allowed_ops): raise ValueError(('`op` was "%s" but is expected to be in %s' % (op, allowed_ops))) if (not _check_crs(left_df, right_df)): _crs_mismatch_warn(left_df, right_df, stacklevel=3) index_left = ('index_%s' % lsuffix) index_right = ('index_%s' % rsuffix) if (any(left_df.columns.isin([index_left, index_right])) or any(right_df.columns.isin([index_left, index_right]))): raise ValueError("'{0}' and '{1}' cannot be names in the frames being joined".format(index_left, index_right)) if (right_df._sindex_generated or ((not left_df._sindex_generated) and (right_df.shape[0] > left_df.shape[0]))): tree_idx = (right_df.sindex if (len(left_df) > 0) else None) tree_idx_right = True else: tree_idx = (left_df.sindex if (len(right_df) > 0) else None) tree_idx_right = False left_df = left_df.copy(deep=True) try: left_index_name = left_df.index.name left_df.index = left_df.index.rename(index_left) except TypeError: index_left = [(('index_%s' % lsuffix) + str(pos)) for (pos, ix) in enumerate(left_df.index.names)] left_index_name = left_df.index.names left_df.index = left_df.index.rename(index_left) left_df = left_df.reset_index() right_df = right_df.copy(deep=True) try: right_index_name = right_df.index.name right_df.index = right_df.index.rename(index_right) except TypeError: index_right = [(('index_%s' % rsuffix) + str(pos)) for (pos, ix) in enumerate(right_df.index.names)] right_index_name = right_df.index.names right_df.index = right_df.index.rename(index_right) right_df = right_df.reset_index() if (op == 'within'): (left_df, right_df) = (right_df, left_df) tree_idx_right = (not tree_idx_right) r_idx = np.empty((0, 0)) l_idx = np.empty((0, 0)) if (tree_idx_right and tree_idx): idxmatch = left_df.geometry.apply((lambda x: x.bounds)).apply((lambda x: (list(tree_idx.intersection(x)) if (not (x == ())) else []))) idxmatch = idxmatch[(idxmatch.apply(len) > 0)] if (idxmatch.shape[0] > 0): r_idx = np.concatenate(idxmatch.values) l_idx = np.concatenate([([i] * len(v)) for (i, v) in idxmatch.iteritems()]) elif ((not tree_idx_right) and tree_idx): idxmatch = right_df.geometry.apply((lambda x: x.bounds)).apply((lambda x: (list(tree_idx.intersection(x)) if (not (x == ())) else []))) idxmatch = idxmatch[(idxmatch.apply(len) > 0)] if (idxmatch.shape[0] > 0): l_idx = np.concatenate(idxmatch.values) r_idx = np.concatenate([([i] * len(v)) for (i, v) in idxmatch.iteritems()]) if ((len(r_idx) > 0) and (len(l_idx) > 0)): if compat.USE_PYGEOS: import pygeos predicate_d = {'intersects': pygeos.intersects, 'contains': pygeos.contains, 'within': pygeos.contains} check_predicates = predicate_d[op] else: def find_intersects(a1, a2): return a1.intersects(a2) def find_contains(a1, a2): return a1.contains(a2) predicate_d = {'intersects': find_intersects, 'contains': find_contains, 'within': find_contains} check_predicates = np.vectorize(predicate_d[op]) if compat.USE_PYGEOS: res = check_predicates(left_df.geometry[l_idx].values.data, right_df[right_df.geometry.name][r_idx].values.data) else: res = check_predicates(left_df.geometry.apply((lambda x: prepared.prep(x)))[l_idx], right_df[right_df.geometry.name][r_idx]) result = pd.DataFrame(np.column_stack([l_idx, r_idx, res])) result.columns = ['_key_left', '_key_right', 'match_bool'] result = pd.DataFrame(result[(result['match_bool'] == 1)]).drop('match_bool', axis=1) else: result = pd.DataFrame(columns=['_key_left', '_key_right'], dtype=float) if (op == 'within'): (left_df, right_df) = (right_df, left_df) result = result.rename(columns={'_key_left': '_key_right', '_key_right': '_key_left'}) if (how == 'inner'): result = result.set_index('_key_left') joined = left_df.merge(result, left_index=True, right_index=True).merge(right_df.drop(right_df.geometry.name, axis=1), left_on='_key_right', right_index=True, suffixes=(('_%s' % lsuffix), ('_%s' % rsuffix))).set_index(index_left).drop(['_key_right'], axis=1) if isinstance(index_left, list): joined.index.names = left_index_name else: joined.index.name = left_index_name elif (how == 'left'): result = result.set_index('_key_left') joined = left_df.merge(result, left_index=True, right_index=True, how='left').merge(right_df.drop(right_df.geometry.name, axis=1), how='left', left_on='_key_right', right_index=True, suffixes=(('_%s' % lsuffix), ('_%s' % rsuffix))).set_index(index_left).drop(['_key_right'], axis=1) if isinstance(index_left, list): joined.index.names = left_index_name else: joined.index.name = left_index_name else: joined = left_df.drop(left_df.geometry.name, axis=1).merge(result.merge(right_df, left_on='_key_right', right_index=True, how='right'), left_index=True, right_on='_key_left', how='right').set_index(index_right).drop(['_key_left', '_key_right'], axis=1) if isinstance(index_right, list): joined.index.names = right_index_name else: joined.index.name = right_index_name return joined
def test_minify(self): 'Tests _minify with an invalid filepath.' with self.assertRaises(subprocess.CalledProcessError) as called_process: build._minify(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH) self.assertEqual(called_process.exception.returncode, 1)
-1,228,251,134,857,554,400
Tests _minify with an invalid filepath.
scripts/build_test.py
test_minify
muarachmann/oppia
python
def test_minify(self): with self.assertRaises(subprocess.CalledProcessError) as called_process: build._minify(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH) self.assertEqual(called_process.exception.returncode, 1)
def test_minify_and_create_sourcemap(self): 'Tests _minify_and_create_sourcemap with an invalid filepath.' with self.assertRaises(subprocess.CalledProcessError) as called_process: build._minify_and_create_sourcemap(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH) self.assertEqual(called_process.exception.returncode, 1)
-7,904,411,719,584,090,000
Tests _minify_and_create_sourcemap with an invalid filepath.
scripts/build_test.py
test_minify_and_create_sourcemap
muarachmann/oppia
python
def test_minify_and_create_sourcemap(self): with self.assertRaises(subprocess.CalledProcessError) as called_process: build._minify_and_create_sourcemap(INVALID_INPUT_FILEPATH, INVALID_OUTPUT_FILEPATH) self.assertEqual(called_process.exception.returncode, 1)
def test_ensure_files_exist(self): 'Test _ensure_files_exist raises exception with a non-existent\n filepath.\n ' non_existent_filepaths = [INVALID_INPUT_FILEPATH] with self.assertRaisesRegexp(OSError, ('File %s does not exist.' % non_existent_filepaths[0])): build._ensure_files_exist(non_existent_filepaths)
1,052,954,398,160,072,000
Test _ensure_files_exist raises exception with a non-existent filepath.
scripts/build_test.py
test_ensure_files_exist
muarachmann/oppia
python
def test_ensure_files_exist(self): 'Test _ensure_files_exist raises exception with a non-existent\n filepath.\n ' non_existent_filepaths = [INVALID_INPUT_FILEPATH] with self.assertRaisesRegexp(OSError, ('File %s does not exist.' % non_existent_filepaths[0])): build._ensure_files_exist(non_existent_filepaths)
def test_join_files(self): 'Determine third_party.js contains the content of the first 10 JS\n files in /third_party/static.\n ' third_party_js_stream = StringIO.StringIO() dependency_filepaths = build.get_dependencies_filepaths() build._join_files(dependency_filepaths['js'], third_party_js_stream) counter = 0 JS_FILE_COUNT = 10 for js_filepath in dependency_filepaths['js']: if (counter == JS_FILE_COUNT): break with open(js_filepath, 'r') as js_file: for line in js_file: self.assertIn(line, third_party_js_stream.getvalue()) counter += 1
5,106,008,982,036,936,000
Determine third_party.js contains the content of the first 10 JS files in /third_party/static.
scripts/build_test.py
test_join_files
muarachmann/oppia
python
def test_join_files(self): 'Determine third_party.js contains the content of the first 10 JS\n files in /third_party/static.\n ' third_party_js_stream = StringIO.StringIO() dependency_filepaths = build.get_dependencies_filepaths() build._join_files(dependency_filepaths['js'], third_party_js_stream) counter = 0 JS_FILE_COUNT = 10 for js_filepath in dependency_filepaths['js']: if (counter == JS_FILE_COUNT): break with open(js_filepath, 'r') as js_file: for line in js_file: self.assertIn(line, third_party_js_stream.getvalue()) counter += 1
def test_generate_copy_tasks_for_fonts(self): 'Test _generate_copy_tasks_for_fonts ensures that the number of copy\n tasks matches the number of font files.\n ' copy_tasks = collections.deque() dependency_filepaths = build.get_dependencies_filepaths() test_target = os.path.join('target', 'fonts', '') self.assertEqual(len(copy_tasks), 0) copy_tasks += build._generate_copy_tasks_for_fonts(dependency_filepaths['fonts'], test_target) self.assertEqual(len(copy_tasks), len(dependency_filepaths['fonts']))
-4,733,900,273,529,137,000
Test _generate_copy_tasks_for_fonts ensures that the number of copy tasks matches the number of font files.
scripts/build_test.py
test_generate_copy_tasks_for_fonts
muarachmann/oppia
python
def test_generate_copy_tasks_for_fonts(self): 'Test _generate_copy_tasks_for_fonts ensures that the number of copy\n tasks matches the number of font files.\n ' copy_tasks = collections.deque() dependency_filepaths = build.get_dependencies_filepaths() test_target = os.path.join('target', 'fonts', ) self.assertEqual(len(copy_tasks), 0) copy_tasks += build._generate_copy_tasks_for_fonts(dependency_filepaths['fonts'], test_target) self.assertEqual(len(copy_tasks), len(dependency_filepaths['fonts']))
def test_insert_hash(self): 'Test _insert_hash returns correct filenames with provided hashes.' self.assertEqual(build._insert_hash('file.js', '123456'), 'file.123456.js') self.assertEqual(build._insert_hash('path/to/file.js', '654321'), 'path/to/file.654321.js') self.assertEqual(build._insert_hash('file.min.js', 'abcdef'), 'file.min.abcdef.js') self.assertEqual(build._insert_hash('path/to/file.min.js', 'fedcba'), 'path/to/file.min.fedcba.js')
713,936,822,209,947,500
Test _insert_hash returns correct filenames with provided hashes.
scripts/build_test.py
test_insert_hash
muarachmann/oppia
python
def test_insert_hash(self): self.assertEqual(build._insert_hash('file.js', '123456'), 'file.123456.js') self.assertEqual(build._insert_hash('path/to/file.js', '654321'), 'path/to/file.654321.js') self.assertEqual(build._insert_hash('file.min.js', 'abcdef'), 'file.min.abcdef.js') self.assertEqual(build._insert_hash('path/to/file.min.js', 'fedcba'), 'path/to/file.min.fedcba.js')
def test_get_file_count(self): 'Test get_file_count returns the correct number of files, excluding\n file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should\n not be built.\n ' all_inclusive_file_count = 0 for (_, _, files) in os.walk(MOCK_EXTENSIONS_DEV_DIR): all_inclusive_file_count += len(files) ignored_file_count = 0 for (_, _, files) in os.walk(MOCK_EXTENSIONS_DEV_DIR): for filename in files: if ((not build.should_file_be_built(filename)) or any((filename.endswith(p) for p in build.FILE_EXTENSIONS_TO_IGNORE))): ignored_file_count += 1 self.assertEqual((all_inclusive_file_count - ignored_file_count), build.get_file_count(MOCK_EXTENSIONS_DEV_DIR))
-4,047,248,619,172,851,000
Test get_file_count returns the correct number of files, excluding file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should not be built.
scripts/build_test.py
test_get_file_count
muarachmann/oppia
python
def test_get_file_count(self): 'Test get_file_count returns the correct number of files, excluding\n file with extensions in FILE_EXTENSIONS_TO_IGNORE and files that should\n not be built.\n ' all_inclusive_file_count = 0 for (_, _, files) in os.walk(MOCK_EXTENSIONS_DEV_DIR): all_inclusive_file_count += len(files) ignored_file_count = 0 for (_, _, files) in os.walk(MOCK_EXTENSIONS_DEV_DIR): for filename in files: if ((not build.should_file_be_built(filename)) or any((filename.endswith(p) for p in build.FILE_EXTENSIONS_TO_IGNORE))): ignored_file_count += 1 self.assertEqual((all_inclusive_file_count - ignored_file_count), build.get_file_count(MOCK_EXTENSIONS_DEV_DIR))
def test_compare_file_count(self): 'Test _compare_file_count raises exception when there is a\n mismatched file count between 2 dirs list.\n ' build.ensure_directory_exists(EMPTY_DIR) source_dir_file_count = build.get_file_count(EMPTY_DIR) assert (source_dir_file_count == 0) target_dir_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR) assert (target_dir_file_count > 0) with self.assertRaisesRegexp(ValueError, ('%s files in first dir list != %s files in second dir list' % (source_dir_file_count, target_dir_file_count))): build._compare_file_count([EMPTY_DIR], [MOCK_ASSETS_DEV_DIR]) MOCK_EXTENSIONS_DIR_LIST = [MOCK_EXTENSIONS_DEV_DIR, MOCK_EXTENSIONS_COMPILED_JS_DIR] target_dir_file_count = (build.get_file_count(MOCK_EXTENSIONS_DEV_DIR) + build.get_file_count(MOCK_EXTENSIONS_COMPILED_JS_DIR)) assert (target_dir_file_count > 0) with self.assertRaisesRegexp(ValueError, ('%s files in first dir list != %s files in second dir list' % (source_dir_file_count, target_dir_file_count))): build._compare_file_count([EMPTY_DIR], MOCK_EXTENSIONS_DIR_LIST) build.safe_delete_directory_tree(EMPTY_DIR)
7,889,284,212,739,964,000
Test _compare_file_count raises exception when there is a mismatched file count between 2 dirs list.
scripts/build_test.py
test_compare_file_count
muarachmann/oppia
python
def test_compare_file_count(self): 'Test _compare_file_count raises exception when there is a\n mismatched file count between 2 dirs list.\n ' build.ensure_directory_exists(EMPTY_DIR) source_dir_file_count = build.get_file_count(EMPTY_DIR) assert (source_dir_file_count == 0) target_dir_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR) assert (target_dir_file_count > 0) with self.assertRaisesRegexp(ValueError, ('%s files in first dir list != %s files in second dir list' % (source_dir_file_count, target_dir_file_count))): build._compare_file_count([EMPTY_DIR], [MOCK_ASSETS_DEV_DIR]) MOCK_EXTENSIONS_DIR_LIST = [MOCK_EXTENSIONS_DEV_DIR, MOCK_EXTENSIONS_COMPILED_JS_DIR] target_dir_file_count = (build.get_file_count(MOCK_EXTENSIONS_DEV_DIR) + build.get_file_count(MOCK_EXTENSIONS_COMPILED_JS_DIR)) assert (target_dir_file_count > 0) with self.assertRaisesRegexp(ValueError, ('%s files in first dir list != %s files in second dir list' % (source_dir_file_count, target_dir_file_count))): build._compare_file_count([EMPTY_DIR], MOCK_EXTENSIONS_DIR_LIST) build.safe_delete_directory_tree(EMPTY_DIR)
def test_verify_filepath_hash(self): 'Test _verify_filepath_hash raises exception:\n 1) When there is an empty hash dict.\n 2) When a filename is expected to contain hash but does not.\n 3) When there is a hash in filename that cannot be found in\n hash dict.\n ' file_hashes = dict() base_filename = 'base.html' with self.assertRaisesRegexp(ValueError, 'Hash dict is empty'): build._verify_filepath_hash(base_filename, file_hashes) file_hashes = {base_filename: random.getrandbits(128)} with self.assertRaisesRegexp(ValueError, ('%s is expected to contain MD5 hash' % base_filename)): build._verify_filepath_hash(base_filename, file_hashes) bad_filepath = 'README' with self.assertRaisesRegexp(ValueError, 'Filepath has less than 2 partitions after splitting'): build._verify_filepath_hash(bad_filepath, file_hashes) hashed_base_filename = build._insert_hash(base_filename, random.getrandbits(128)) with self.assertRaisesRegexp(KeyError, ('Hash from file named %s does not match hash dict values' % hashed_base_filename)): build._verify_filepath_hash(hashed_base_filename, file_hashes)
-1,598,340,757,166,783,200
Test _verify_filepath_hash raises exception: 1) When there is an empty hash dict. 2) When a filename is expected to contain hash but does not. 3) When there is a hash in filename that cannot be found in hash dict.
scripts/build_test.py
test_verify_filepath_hash
muarachmann/oppia
python
def test_verify_filepath_hash(self): 'Test _verify_filepath_hash raises exception:\n 1) When there is an empty hash dict.\n 2) When a filename is expected to contain hash but does not.\n 3) When there is a hash in filename that cannot be found in\n hash dict.\n ' file_hashes = dict() base_filename = 'base.html' with self.assertRaisesRegexp(ValueError, 'Hash dict is empty'): build._verify_filepath_hash(base_filename, file_hashes) file_hashes = {base_filename: random.getrandbits(128)} with self.assertRaisesRegexp(ValueError, ('%s is expected to contain MD5 hash' % base_filename)): build._verify_filepath_hash(base_filename, file_hashes) bad_filepath = 'README' with self.assertRaisesRegexp(ValueError, 'Filepath has less than 2 partitions after splitting'): build._verify_filepath_hash(bad_filepath, file_hashes) hashed_base_filename = build._insert_hash(base_filename, random.getrandbits(128)) with self.assertRaisesRegexp(KeyError, ('Hash from file named %s does not match hash dict values' % hashed_base_filename)): build._verify_filepath_hash(hashed_base_filename, file_hashes)
def test_process_html(self): 'Test process_html removes whitespaces and adds hash to filepaths.' BASE_HTML_SOURCE_PATH = os.path.join(MOCK_TEMPLATES_DEV_DIR, 'base.html') BASE_JS_RELATIVE_PATH = os.path.join('pages', 'Base.js') BASE_JS_SOURCE_PATH = os.path.join(MOCK_TEMPLATES_COMPILED_JS_DIR, BASE_JS_RELATIVE_PATH) build._ensure_files_exist([BASE_HTML_SOURCE_PATH, BASE_JS_SOURCE_PATH]) minified_html_file_stream = StringIO.StringIO() with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)): file_hashes = build.get_file_hashes(MOCK_TEMPLATES_DEV_DIR) file_hashes.update(build.get_file_hashes(MOCK_TEMPLATES_COMPILED_JS_DIR)) with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file: source_base_file_content = source_base_file.read() self.assertRegexpMatches(source_base_file_content, '\\s{2,}', msg=('No white spaces detected in %s unexpectedly' % BASE_HTML_SOURCE_PATH)) self.assertIn(BASE_JS_RELATIVE_PATH, source_base_file_content) with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file: build.process_html(source_base_file, minified_html_file_stream, file_hashes) minified_html_file_content = minified_html_file_stream.getvalue() self.assertNotRegexpMatches(minified_html_file_content, '\\s{2,}', msg=('All white spaces must be removed from %s' % BASE_HTML_SOURCE_PATH)) final_filename = build._insert_hash(BASE_JS_RELATIVE_PATH, file_hashes[BASE_JS_RELATIVE_PATH]) self.assertIn(final_filename, minified_html_file_content)
-218,790,766,903,421,200
Test process_html removes whitespaces and adds hash to filepaths.
scripts/build_test.py
test_process_html
muarachmann/oppia
python
def test_process_html(self): BASE_HTML_SOURCE_PATH = os.path.join(MOCK_TEMPLATES_DEV_DIR, 'base.html') BASE_JS_RELATIVE_PATH = os.path.join('pages', 'Base.js') BASE_JS_SOURCE_PATH = os.path.join(MOCK_TEMPLATES_COMPILED_JS_DIR, BASE_JS_RELATIVE_PATH) build._ensure_files_exist([BASE_HTML_SOURCE_PATH, BASE_JS_SOURCE_PATH]) minified_html_file_stream = StringIO.StringIO() with self.swap(build, 'FILE_EXTENSIONS_TO_IGNORE', ('.html',)): file_hashes = build.get_file_hashes(MOCK_TEMPLATES_DEV_DIR) file_hashes.update(build.get_file_hashes(MOCK_TEMPLATES_COMPILED_JS_DIR)) with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file: source_base_file_content = source_base_file.read() self.assertRegexpMatches(source_base_file_content, '\\s{2,}', msg=('No white spaces detected in %s unexpectedly' % BASE_HTML_SOURCE_PATH)) self.assertIn(BASE_JS_RELATIVE_PATH, source_base_file_content) with open(BASE_HTML_SOURCE_PATH, 'r') as source_base_file: build.process_html(source_base_file, minified_html_file_stream, file_hashes) minified_html_file_content = minified_html_file_stream.getvalue() self.assertNotRegexpMatches(minified_html_file_content, '\\s{2,}', msg=('All white spaces must be removed from %s' % BASE_HTML_SOURCE_PATH)) final_filename = build._insert_hash(BASE_JS_RELATIVE_PATH, file_hashes[BASE_JS_RELATIVE_PATH]) self.assertIn(final_filename, minified_html_file_content)
def test_should_file_be_built(self): 'Test should_file_be_built returns the correct boolean value for\n filepath that should be built.\n ' service_js_filepath = os.path.join('local_compiled_js', 'core', 'pages', 'AudioService.js') generated_parser_js_filepath = os.path.join('core', 'expressions', 'ExpressionParserService.js') compiled_generated_parser_js_filepath = os.path.join('local_compiled_js', 'core', 'expressions', 'ExpressionParserService.js') service_ts_filepath = os.path.join('core', 'pages', 'AudioService.ts') spec_js_filepath = os.path.join('core', 'pages', 'AudioServiceSpec.js') protractor_filepath = os.path.join('extensions', 'protractor.js') python_controller_filepath = os.path.join('base.py') pyc_test_filepath = os.path.join('core', 'controllers', 'base.pyc') python_test_filepath = os.path.join('core', 'tests', 'base_test.py') self.assertFalse(build.should_file_be_built(spec_js_filepath)) self.assertFalse(build.should_file_be_built(protractor_filepath)) self.assertTrue(build.should_file_be_built(service_js_filepath)) self.assertFalse(build.should_file_be_built(service_ts_filepath)) self.assertFalse(build.should_file_be_built(python_test_filepath)) self.assertFalse(build.should_file_be_built(pyc_test_filepath)) self.assertTrue(build.should_file_be_built(python_controller_filepath)) with self.swap(build, 'JS_FILENAME_SUFFIXES_TO_IGNORE', ('Service.js',)): self.assertFalse(build.should_file_be_built(service_js_filepath)) self.assertTrue(build.should_file_be_built(spec_js_filepath)) with self.swap(build, 'JS_FILEPATHS_NOT_TO_BUILD', ('core/expressions/ExpressionParserService.js',)): self.assertFalse(build.should_file_be_built(generated_parser_js_filepath)) self.assertTrue(build.should_file_be_built(compiled_generated_parser_js_filepath))
1,018,427,264,036,538,800
Test should_file_be_built returns the correct boolean value for filepath that should be built.
scripts/build_test.py
test_should_file_be_built
muarachmann/oppia
python
def test_should_file_be_built(self): 'Test should_file_be_built returns the correct boolean value for\n filepath that should be built.\n ' service_js_filepath = os.path.join('local_compiled_js', 'core', 'pages', 'AudioService.js') generated_parser_js_filepath = os.path.join('core', 'expressions', 'ExpressionParserService.js') compiled_generated_parser_js_filepath = os.path.join('local_compiled_js', 'core', 'expressions', 'ExpressionParserService.js') service_ts_filepath = os.path.join('core', 'pages', 'AudioService.ts') spec_js_filepath = os.path.join('core', 'pages', 'AudioServiceSpec.js') protractor_filepath = os.path.join('extensions', 'protractor.js') python_controller_filepath = os.path.join('base.py') pyc_test_filepath = os.path.join('core', 'controllers', 'base.pyc') python_test_filepath = os.path.join('core', 'tests', 'base_test.py') self.assertFalse(build.should_file_be_built(spec_js_filepath)) self.assertFalse(build.should_file_be_built(protractor_filepath)) self.assertTrue(build.should_file_be_built(service_js_filepath)) self.assertFalse(build.should_file_be_built(service_ts_filepath)) self.assertFalse(build.should_file_be_built(python_test_filepath)) self.assertFalse(build.should_file_be_built(pyc_test_filepath)) self.assertTrue(build.should_file_be_built(python_controller_filepath)) with self.swap(build, 'JS_FILENAME_SUFFIXES_TO_IGNORE', ('Service.js',)): self.assertFalse(build.should_file_be_built(service_js_filepath)) self.assertTrue(build.should_file_be_built(spec_js_filepath)) with self.swap(build, 'JS_FILEPATHS_NOT_TO_BUILD', ('core/expressions/ExpressionParserService.js',)): self.assertFalse(build.should_file_be_built(generated_parser_js_filepath)) self.assertTrue(build.should_file_be_built(compiled_generated_parser_js_filepath))
def test_hash_should_be_inserted(self): 'Test hash_should_be_inserted returns the correct boolean value\n for filepath that should be hashed.\n ' with self.swap(build, 'FILEPATHS_NOT_TO_RENAME', ('*.py', 'path/to/fonts/*', 'path/to/third_party.min.js.map', 'path/to/third_party.min.css.map')): self.assertFalse(build.hash_should_be_inserted('path/to/fonts/fontawesome-webfont.svg')) self.assertFalse(build.hash_should_be_inserted('path/to/third_party.min.css.map')) self.assertFalse(build.hash_should_be_inserted('path/to/third_party.min.js.map')) self.assertTrue(build.hash_should_be_inserted('path/to/wrongFonts/fonta.eot')) self.assertTrue(build.hash_should_be_inserted('rich_text_components/Video/protractor.js')) self.assertFalse(build.hash_should_be_inserted('main.py')) self.assertFalse(build.hash_should_be_inserted('extensions/domain.py'))
-5,054,451,901,803,749,000
Test hash_should_be_inserted returns the correct boolean value for filepath that should be hashed.
scripts/build_test.py
test_hash_should_be_inserted
muarachmann/oppia
python
def test_hash_should_be_inserted(self): 'Test hash_should_be_inserted returns the correct boolean value\n for filepath that should be hashed.\n ' with self.swap(build, 'FILEPATHS_NOT_TO_RENAME', ('*.py', 'path/to/fonts/*', 'path/to/third_party.min.js.map', 'path/to/third_party.min.css.map')): self.assertFalse(build.hash_should_be_inserted('path/to/fonts/fontawesome-webfont.svg')) self.assertFalse(build.hash_should_be_inserted('path/to/third_party.min.css.map')) self.assertFalse(build.hash_should_be_inserted('path/to/third_party.min.js.map')) self.assertTrue(build.hash_should_be_inserted('path/to/wrongFonts/fonta.eot')) self.assertTrue(build.hash_should_be_inserted('rich_text_components/Video/protractor.js')) self.assertFalse(build.hash_should_be_inserted('main.py')) self.assertFalse(build.hash_should_be_inserted('extensions/domain.py'))
def test_generate_copy_tasks_to_copy_from_source_to_target(self): 'Test generate_copy_tasks_to_copy_from_source_to_target queues up\n the same number of copy tasks as the number of files in the directory.\n ' assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR) total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR) copy_tasks = collections.deque() self.assertEqual(len(copy_tasks), 0) copy_tasks += build.generate_copy_tasks_to_copy_from_source_to_target(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, assets_hashes) self.assertEqual(len(copy_tasks), total_file_count)
8,098,721,213,208,466,000
Test generate_copy_tasks_to_copy_from_source_to_target queues up the same number of copy tasks as the number of files in the directory.
scripts/build_test.py
test_generate_copy_tasks_to_copy_from_source_to_target
muarachmann/oppia
python
def test_generate_copy_tasks_to_copy_from_source_to_target(self): 'Test generate_copy_tasks_to_copy_from_source_to_target queues up\n the same number of copy tasks as the number of files in the directory.\n ' assets_hashes = build.get_file_hashes(MOCK_ASSETS_DEV_DIR) total_file_count = build.get_file_count(MOCK_ASSETS_DEV_DIR) copy_tasks = collections.deque() self.assertEqual(len(copy_tasks), 0) copy_tasks += build.generate_copy_tasks_to_copy_from_source_to_target(MOCK_ASSETS_DEV_DIR, MOCK_ASSETS_OUT_DIR, assets_hashes) self.assertEqual(len(copy_tasks), total_file_count)
def test_is_file_hash_provided_to_frontend(self): 'Test is_file_hash_provided_to_frontend returns the correct boolean\n value for filepath that should be provided to frontend.\n ' with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('path/to/file.js', 'path/to/file.html', 'file.js')): self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.js')) self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.html')) self.assertTrue(build.is_file_hash_provided_to_frontend('file.js')) with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('path/to/*', '*.js', '*_end.html')): self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.js')) self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.html')) self.assertTrue(build.is_file_hash_provided_to_frontend('file.js')) self.assertFalse(build.is_file_hash_provided_to_frontend('path/file.css')) self.assertTrue(build.is_file_hash_provided_to_frontend('good_end.html')) self.assertFalse(build.is_file_hash_provided_to_frontend('bad_end.css'))
-9,103,280,922,856,293,000
Test is_file_hash_provided_to_frontend returns the correct boolean value for filepath that should be provided to frontend.
scripts/build_test.py
test_is_file_hash_provided_to_frontend
muarachmann/oppia
python
def test_is_file_hash_provided_to_frontend(self): 'Test is_file_hash_provided_to_frontend returns the correct boolean\n value for filepath that should be provided to frontend.\n ' with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('path/to/file.js', 'path/to/file.html', 'file.js')): self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.js')) self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.html')) self.assertTrue(build.is_file_hash_provided_to_frontend('file.js')) with self.swap(build, 'FILEPATHS_PROVIDED_TO_FRONTEND', ('path/to/*', '*.js', '*_end.html')): self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.js')) self.assertTrue(build.is_file_hash_provided_to_frontend('path/to/file.html')) self.assertTrue(build.is_file_hash_provided_to_frontend('file.js')) self.assertFalse(build.is_file_hash_provided_to_frontend('path/file.css')) self.assertTrue(build.is_file_hash_provided_to_frontend('good_end.html')) self.assertFalse(build.is_file_hash_provided_to_frontend('bad_end.css'))