repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
ninuxorg/nodeshot | nodeshot/networking/connectors/models/device_connector.py | DeviceConnector._get_netengine_backend | def _get_netengine_backend(self):
"""
returns the netengine backend specified in self.backend
for internal use only
"""
# extract backend class name, eg: AirOS or OpenWRT
backend_class_name = self.backend.split('.')[-1]
# convert to lowercase to get the path
backend_path = self.backend.lower()
# import module by its path
module = import_module(backend_path)
# get netengine backend class
BackendClass = getattr(module, backend_class_name)
return BackendClass | python | def _get_netengine_backend(self):
"""
returns the netengine backend specified in self.backend
for internal use only
"""
# extract backend class name, eg: AirOS or OpenWRT
backend_class_name = self.backend.split('.')[-1]
# convert to lowercase to get the path
backend_path = self.backend.lower()
# import module by its path
module = import_module(backend_path)
# get netengine backend class
BackendClass = getattr(module, backend_class_name)
return BackendClass | [
"def",
"_get_netengine_backend",
"(",
"self",
")",
":",
"# extract backend class name, eg: AirOS or OpenWRT",
"backend_class_name",
"=",
"self",
".",
"backend",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"# convert to lowercase to get the path",
"backend_path",
... | returns the netengine backend specified in self.backend
for internal use only | [
"returns",
"the",
"netengine",
"backend",
"specified",
"in",
"self",
".",
"backend",
"for",
"internal",
"use",
"only"
] | 2466f0a55f522b2696026f196436ce7ba3f1e5c6 | https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/connectors/models/device_connector.py#L218-L232 | train | 52,300 |
ninuxorg/nodeshot | nodeshot/networking/connectors/models/device_connector.py | DeviceConnector._build_netengine_arguments | def _build_netengine_arguments(self):
"""
returns a python dictionary representing arguments
that will be passed to a netengine backend
for internal use only
"""
arguments = {
"host": self.host
}
if self.config is not None:
for key, value in self.config.iteritems():
arguments[key] = value
if self.port:
arguments["port"] = self.port
return arguments | python | def _build_netengine_arguments(self):
"""
returns a python dictionary representing arguments
that will be passed to a netengine backend
for internal use only
"""
arguments = {
"host": self.host
}
if self.config is not None:
for key, value in self.config.iteritems():
arguments[key] = value
if self.port:
arguments["port"] = self.port
return arguments | [
"def",
"_build_netengine_arguments",
"(",
"self",
")",
":",
"arguments",
"=",
"{",
"\"host\"",
":",
"self",
".",
"host",
"}",
"if",
"self",
".",
"config",
"is",
"not",
"None",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"config",
".",
"iteritem... | returns a python dictionary representing arguments
that will be passed to a netengine backend
for internal use only | [
"returns",
"a",
"python",
"dictionary",
"representing",
"arguments",
"that",
"will",
"be",
"passed",
"to",
"a",
"netengine",
"backend",
"for",
"internal",
"use",
"only"
] | 2466f0a55f522b2696026f196436ce7ba3f1e5c6 | https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/networking/connectors/models/device_connector.py#L234-L251 | train | 52,301 |
bcwaldon/warlock | warlock/core.py | model_factory | def model_factory(schema, resolver=None, base_class=model.Model, name=None):
"""Generate a model class based on the provided JSON Schema
:param schema: dict representing valid JSON schema
:param name: A name to give the class, if `name` is not in `schema`
"""
schema = copy.deepcopy(schema)
resolver = resolver
class Model(base_class):
def __init__(self, *args, **kwargs):
self.__dict__['schema'] = schema
self.__dict__['resolver'] = resolver
base_class.__init__(self, *args, **kwargs)
if resolver is not None:
Model.resolver = resolver
if name is not None:
Model.__name__ = name
elif 'name' in schema:
Model.__name__ = str(schema['name'])
return Model | python | def model_factory(schema, resolver=None, base_class=model.Model, name=None):
"""Generate a model class based on the provided JSON Schema
:param schema: dict representing valid JSON schema
:param name: A name to give the class, if `name` is not in `schema`
"""
schema = copy.deepcopy(schema)
resolver = resolver
class Model(base_class):
def __init__(self, *args, **kwargs):
self.__dict__['schema'] = schema
self.__dict__['resolver'] = resolver
base_class.__init__(self, *args, **kwargs)
if resolver is not None:
Model.resolver = resolver
if name is not None:
Model.__name__ = name
elif 'name' in schema:
Model.__name__ = str(schema['name'])
return Model | [
"def",
"model_factory",
"(",
"schema",
",",
"resolver",
"=",
"None",
",",
"base_class",
"=",
"model",
".",
"Model",
",",
"name",
"=",
"None",
")",
":",
"schema",
"=",
"copy",
".",
"deepcopy",
"(",
"schema",
")",
"resolver",
"=",
"resolver",
"class",
"M... | Generate a model class based on the provided JSON Schema
:param schema: dict representing valid JSON schema
:param name: A name to give the class, if `name` is not in `schema` | [
"Generate",
"a",
"model",
"class",
"based",
"on",
"the",
"provided",
"JSON",
"Schema"
] | 19b2b3e103ddd753bb5da5b5d96f801c267dad3b | https://github.com/bcwaldon/warlock/blob/19b2b3e103ddd753bb5da5b5d96f801c267dad3b/warlock/core.py#L22-L44 | train | 52,302 |
bcwaldon/warlock | warlock/model.py | Model.patch | def patch(self):
"""Return a jsonpatch object representing the delta"""
original = self.__dict__['__original__']
return jsonpatch.make_patch(original, dict(self)).to_string() | python | def patch(self):
"""Return a jsonpatch object representing the delta"""
original = self.__dict__['__original__']
return jsonpatch.make_patch(original, dict(self)).to_string() | [
"def",
"patch",
"(",
"self",
")",
":",
"original",
"=",
"self",
".",
"__dict__",
"[",
"'__original__'",
"]",
"return",
"jsonpatch",
".",
"make_patch",
"(",
"original",
",",
"dict",
"(",
"self",
")",
")",
".",
"to_string",
"(",
")"
] | Return a jsonpatch object representing the delta | [
"Return",
"a",
"jsonpatch",
"object",
"representing",
"the",
"delta"
] | 19b2b3e103ddd753bb5da5b5d96f801c267dad3b | https://github.com/bcwaldon/warlock/blob/19b2b3e103ddd753bb5da5b5d96f801c267dad3b/warlock/model.py#L125-L128 | train | 52,303 |
bcwaldon/warlock | warlock/model.py | Model.changes | def changes(self):
"""Dumber version of 'patch' method"""
deprecation_msg = 'Model.changes will be removed in warlock v2'
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
return copy.deepcopy(self.__dict__['changes']) | python | def changes(self):
"""Dumber version of 'patch' method"""
deprecation_msg = 'Model.changes will be removed in warlock v2'
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
return copy.deepcopy(self.__dict__['changes']) | [
"def",
"changes",
"(",
"self",
")",
":",
"deprecation_msg",
"=",
"'Model.changes will be removed in warlock v2'",
"warnings",
".",
"warn",
"(",
"deprecation_msg",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"copy",
".",
"deepcopy",
"(",
... | Dumber version of 'patch' method | [
"Dumber",
"version",
"of",
"patch",
"method"
] | 19b2b3e103ddd753bb5da5b5d96f801c267dad3b | https://github.com/bcwaldon/warlock/blob/19b2b3e103ddd753bb5da5b5d96f801c267dad3b/warlock/model.py#L131-L135 | train | 52,304 |
sashs/filebytes | filebytes/mach_o.py | MachO.isSupportedContent | def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
magic = bytearray(fileContent)[:4]
return magic == p('>I', 0xfeedface) or magic == p('>I', 0xfeedfacf) or magic == p('<I', 0xfeedface) or magic == p('<I', 0xfeedfacf) | python | def isSupportedContent(cls, fileContent):
"""Returns if the files are valid for this filetype"""
magic = bytearray(fileContent)[:4]
return magic == p('>I', 0xfeedface) or magic == p('>I', 0xfeedfacf) or magic == p('<I', 0xfeedface) or magic == p('<I', 0xfeedfacf) | [
"def",
"isSupportedContent",
"(",
"cls",
",",
"fileContent",
")",
":",
"magic",
"=",
"bytearray",
"(",
"fileContent",
")",
"[",
":",
"4",
"]",
"return",
"magic",
"==",
"p",
"(",
"'>I'",
",",
"0xfeedface",
")",
"or",
"magic",
"==",
"p",
"(",
"'>I'",
"... | Returns if the files are valid for this filetype | [
"Returns",
"if",
"the",
"files",
"are",
"valid",
"for",
"this",
"filetype"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/mach_o.py#L476-L479 | train | 52,305 |
sashs/filebytes | filebytes/oat.py | OAT._parseOatHeader | def _parseOatHeader(self, data):
"""Returns the OatHeader"""
header = OatHeader.from_buffer(data)
if header.magic != b'oat\n':
raise BinaryError('No valid OAT file')
key_value_store_bytes = (c_ubyte * header.keyValueStoreSize).from_buffer(data, sizeof(OatHeader))
key_value_store = self.__parseKeyValueStore(key_value_store_bytes)
return OatHeaderData(header=header, keyValueStoreRaw=key_value_store_bytes, keyValueStore=key_value_store) | python | def _parseOatHeader(self, data):
"""Returns the OatHeader"""
header = OatHeader.from_buffer(data)
if header.magic != b'oat\n':
raise BinaryError('No valid OAT file')
key_value_store_bytes = (c_ubyte * header.keyValueStoreSize).from_buffer(data, sizeof(OatHeader))
key_value_store = self.__parseKeyValueStore(key_value_store_bytes)
return OatHeaderData(header=header, keyValueStoreRaw=key_value_store_bytes, keyValueStore=key_value_store) | [
"def",
"_parseOatHeader",
"(",
"self",
",",
"data",
")",
":",
"header",
"=",
"OatHeader",
".",
"from_buffer",
"(",
"data",
")",
"if",
"header",
".",
"magic",
"!=",
"b'oat\\n'",
":",
"raise",
"BinaryError",
"(",
"'No valid OAT file'",
")",
"key_value_store_byte... | Returns the OatHeader | [
"Returns",
"the",
"OatHeader"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/oat.py#L169-L178 | train | 52,306 |
sashs/filebytes | filebytes/oat.py | OAT.__parseKeyValueStore | def __parseKeyValueStore(self, data):
"""Returns a dictionary filled with the keys and values of the key value store"""
offset = 0
key_value_store = {}
while offset != len(data):
key = get_str(data, offset)
offset += len(key)+1
value = get_str(data, offset)
offset += len(value)+1
key_value_store[key] = value
return key_value_store | python | def __parseKeyValueStore(self, data):
"""Returns a dictionary filled with the keys and values of the key value store"""
offset = 0
key_value_store = {}
while offset != len(data):
key = get_str(data, offset)
offset += len(key)+1
value = get_str(data, offset)
offset += len(value)+1
key_value_store[key] = value
return key_value_store | [
"def",
"__parseKeyValueStore",
"(",
"self",
",",
"data",
")",
":",
"offset",
"=",
"0",
"key_value_store",
"=",
"{",
"}",
"while",
"offset",
"!=",
"len",
"(",
"data",
")",
":",
"key",
"=",
"get_str",
"(",
"data",
",",
"offset",
")",
"offset",
"+=",
"l... | Returns a dictionary filled with the keys and values of the key value store | [
"Returns",
"a",
"dictionary",
"filled",
"with",
"the",
"keys",
"and",
"values",
"of",
"the",
"key",
"value",
"store"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/oat.py#L180-L193 | train | 52,307 |
sashs/filebytes | filebytes/pe.py | to_raw_address | def to_raw_address(addr, section):
"""Converts the addr from a rva to a pointer to raw data in the file"""
return addr - section.header.VirtualAddress + section.header.PointerToRawData | python | def to_raw_address(addr, section):
"""Converts the addr from a rva to a pointer to raw data in the file"""
return addr - section.header.VirtualAddress + section.header.PointerToRawData | [
"def",
"to_raw_address",
"(",
"addr",
",",
"section",
")",
":",
"return",
"addr",
"-",
"section",
".",
"header",
".",
"VirtualAddress",
"+",
"section",
".",
"header",
".",
"PointerToRawData"
] | Converts the addr from a rva to a pointer to raw data in the file | [
"Converts",
"the",
"addr",
"from",
"a",
"rva",
"to",
"a",
"pointer",
"to",
"raw",
"data",
"in",
"the",
"file"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L356-L358 | train | 52,308 |
sashs/filebytes | filebytes/pe.py | PE._parseImageDosHeader | def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh) | python | def _parseImageDosHeader(self, data):
"""Returns the ImageDosHeader"""
ioh = IMAGE_DOS_HEADER.from_buffer(data)
if ioh.e_magic != b'MZ':
raise BinaryError('No valid PE/COFF file')
return ImageDosHeaderData(header=ioh) | [
"def",
"_parseImageDosHeader",
"(",
"self",
",",
"data",
")",
":",
"ioh",
"=",
"IMAGE_DOS_HEADER",
".",
"from_buffer",
"(",
"data",
")",
"if",
"ioh",
".",
"e_magic",
"!=",
"b'MZ'",
":",
"raise",
"BinaryError",
"(",
"'No valid PE/COFF file'",
")",
"return",
"... | Returns the ImageDosHeader | [
"Returns",
"the",
"ImageDosHeader"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L498-L504 | train | 52,309 |
sashs/filebytes | filebytes/pe.py | PE._parseImageNtHeaders | def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth) | python | def _parseImageNtHeaders(self, data, imageDosHeader):
"""Returns the ImageNtHeaders"""
inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew)
if inth.Signature != b'PE':
raise BinaryError('No valid PE/COFF file')
return ImageNtHeaderData(header=inth) | [
"def",
"_parseImageNtHeaders",
"(",
"self",
",",
"data",
",",
"imageDosHeader",
")",
":",
"inth",
"=",
"self",
".",
"_classes",
".",
"IMAGE_NT_HEADERS",
".",
"from_buffer",
"(",
"data",
",",
"imageDosHeader",
".",
"header",
".",
"e_lfanew",
")",
"if",
"inth"... | Returns the ImageNtHeaders | [
"Returns",
"the",
"ImageNtHeaders"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L506-L513 | train | 52,310 |
sashs/filebytes | filebytes/pe.py | PE._parseSections | def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections | python | def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False):
"""Parses the sections in the memory and returns a list of them"""
sections = []
optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER)
offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders
image_section_header_size = sizeof(IMAGE_SECTION_HEADER)
for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections):
ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset)
if parse_header_only:
raw = None
bytes_ = bytearray()
else:
size = ishdr.SizeOfRawData
raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData)
bytes_ = bytearray(raw)
sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw))
offset += image_section_header_size
return sections | [
"def",
"_parseSections",
"(",
"self",
",",
"data",
",",
"imageDosHeader",
",",
"imageNtHeaders",
",",
"parse_header_only",
"=",
"False",
")",
":",
"sections",
"=",
"[",
"]",
"optional_header_offset",
"=",
"imageDosHeader",
".",
"header",
".",
"e_lfanew",
"+",
... | Parses the sections in the memory and returns a list of them | [
"Parses",
"the",
"sections",
"in",
"the",
"memory",
"and",
"returns",
"a",
"list",
"of",
"them"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L515-L539 | train | 52,311 |
sashs/filebytes | filebytes/pe.py | PE._getSectionForDataDirectoryEntry | def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section | python | def _getSectionForDataDirectoryEntry(self, data_directory_entry, sections):
"""Returns the section which contains the data of DataDirectory"""
for section in sections:
if data_directory_entry.VirtualAddress >= section.header.VirtualAddress and \
data_directory_entry.VirtualAddress < section.header.VirtualAddress + section.header.SizeOfRawData :
return section | [
"def",
"_getSectionForDataDirectoryEntry",
"(",
"self",
",",
"data_directory_entry",
",",
"sections",
")",
":",
"for",
"section",
"in",
"sections",
":",
"if",
"data_directory_entry",
".",
"VirtualAddress",
">=",
"section",
".",
"header",
".",
"VirtualAddress",
"and"... | Returns the section which contains the data of DataDirectory | [
"Returns",
"the",
"section",
"which",
"contains",
"the",
"data",
"of",
"DataDirectory"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L541-L547 | train | 52,312 |
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectory | def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list | python | def _parseDataDirectory(self, data, sections, imageNtHeaders):
"""Parses the entries of the DataDirectory and returns a list of the content"""
data_directory_data_list = [None for i in range(15)]
# parse DataDirectory[Export]
export_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.EXPORT]
export_section = self._getSectionForDataDirectoryEntry(export_data_directory, sections)
export_data_directory_data = self._parseDataDirectoryExport(data, export_data_directory, export_section)
data_directory_data_list[ImageDirectoryEntry.EXPORT] = export_data_directory_data
# parse DataDirectory[Import]
import_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.IMPORT]
import_section = self._getSectionForDataDirectoryEntry(import_data_directory, sections)
import_data_directory_data = self._parseDataDirectoryImport(import_data_directory, import_section)
data_directory_data_list[ImageDirectoryEntry.IMPORT] = import_data_directory_data
# parse DataDirectory[LOAD_CONFIG]
loadconfig_data_directory = imageNtHeaders.header.OptionalHeader.DataDirectory[ImageDirectoryEntry.LOAD_CONFIG]
loadconfig_section = self._getSectionForDataDirectoryEntry(loadconfig_data_directory, sections)
loadconfig_data = self._parseLoadConfig(loadconfig_data_directory, loadconfig_section)
data_directory_data_list[ImageDirectoryEntry.LOAD_CONFIG] = loadconfig_data
return data_directory_data_list | [
"def",
"_parseDataDirectory",
"(",
"self",
",",
"data",
",",
"sections",
",",
"imageNtHeaders",
")",
":",
"data_directory_data_list",
"=",
"[",
"None",
"for",
"i",
"in",
"range",
"(",
"15",
")",
"]",
"# parse DataDirectory[Export]",
"export_data_directory",
"=",
... | Parses the entries of the DataDirectory and returns a list of the content | [
"Parses",
"the",
"entries",
"of",
"the",
"DataDirectory",
"and",
"returns",
"a",
"list",
"of",
"the",
"content"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L549-L571 | train | 52,313 |
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectoryExport | def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions) | python | def _parseDataDirectoryExport(self, data, dataDirectoryEntry, exportSection):
"""Parses the EmportDataDirectory and returns an instance of ExportDirectoryData"""
if not exportSection:
return
functions = []
export_directory = IMAGE_EXPORT_DIRECTORY.from_buffer(exportSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, exportSection))
offset = to_offset(export_directory.Name, exportSection)
checkOffset(offset, exportSection)
name = get_str(exportSection.raw, offset)
offsetOfNames = to_offset(export_directory.AddressOfNames, exportSection)
offsetOfAddress = to_offset(export_directory.AddressOfFunctions, exportSection)
offsetOfNameOrdinals = to_offset(export_directory.AddressOfNameOrdinals, exportSection)
for i in range(export_directory.NumberOfNames):
name_address = c_uint.from_buffer(exportSection.raw, offsetOfNames).value
name_offset = to_offset(name_address, exportSection)
checkOffset(name_offset, exportSection)
func_name = get_str(exportSection.raw, name_offset)
ordinal = c_ushort.from_buffer(exportSection.raw, offsetOfNameOrdinals).value
func_addr = c_uint.from_buffer(exportSection.raw, offsetOfAddress).value
offsetOfNames += 4
offsetOfAddress += 4
offsetOfNameOrdinals += 2
functions.append(FunctionData(name=func_name, rva=func_addr, ordinal=ordinal))
return ExportDirectoryData(header=export_directory, name=name, functions=functions) | [
"def",
"_parseDataDirectoryExport",
"(",
"self",
",",
"data",
",",
"dataDirectoryEntry",
",",
"exportSection",
")",
":",
"if",
"not",
"exportSection",
":",
"return",
"functions",
"=",
"[",
"]",
"export_directory",
"=",
"IMAGE_EXPORT_DIRECTORY",
".",
"from_buffer",
... | Parses the EmportDataDirectory and returns an instance of ExportDirectoryData | [
"Parses",
"the",
"EmportDataDirectory",
"and",
"returns",
"an",
"instance",
"of",
"ExportDirectoryData"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L573-L601 | train | 52,314 |
sashs/filebytes | filebytes/pe.py | PE._parseDataDirectoryImport | def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors | python | def _parseDataDirectoryImport(self, dataDirectoryEntry, importSection):
"""Parses the ImportDataDirectory and returns a list of ImportDescriptorData"""
if not importSection:
return
raw_bytes = (c_ubyte * dataDirectoryEntry.Size).from_buffer(importSection.raw, to_offset(dataDirectoryEntry.VirtualAddress, importSection))
offset = 0
import_descriptors = []
while True:
import_descriptor = IMAGE_IMPORT_DESCRIPTOR.from_buffer(raw_bytes, offset)
if import_descriptor.OriginalFirstThunk == 0:
break
else:
nameOffset = to_offset(import_descriptor.Name, importSection)
checkOffset(nameOffset, importSection)
dllName = get_str(importSection.raw, nameOffset)
import_name_table = self.__parseThunks(import_descriptor.OriginalFirstThunk, importSection)
import_address_table = self.__parseThunks(import_descriptor.FirstThunk, importSection)
import_descriptors.append(ImportDescriptorData(header=import_descriptor, dllName=dllName, importNameTable=import_name_table, importAddressTable=import_address_table))
offset += sizeof(IMAGE_IMPORT_DESCRIPTOR)
return import_descriptors | [
"def",
"_parseDataDirectoryImport",
"(",
"self",
",",
"dataDirectoryEntry",
",",
"importSection",
")",
":",
"if",
"not",
"importSection",
":",
"return",
"raw_bytes",
"=",
"(",
"c_ubyte",
"*",
"dataDirectoryEntry",
".",
"Size",
")",
".",
"from_buffer",
"(",
"impo... | Parses the ImportDataDirectory and returns a list of ImportDescriptorData | [
"Parses",
"the",
"ImportDataDirectory",
"and",
"returns",
"a",
"list",
"of",
"ImportDescriptorData"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L603-L629 | train | 52,315 |
sashs/filebytes | filebytes/pe.py | PE.__parseThunks | def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks | python | def __parseThunks(self, thunkRVA, importSection):
"""Parses the thunks and returns a list"""
offset = to_offset(thunkRVA, importSection)
table_offset = 0
thunks = []
while True:
thunk = IMAGE_THUNK_DATA.from_buffer(importSection.raw, offset)
offset += sizeof(IMAGE_THUNK_DATA)
if thunk.Ordinal == 0:
break
thunkData = ThunkData(header=thunk, rva=table_offset+thunkRVA,ordinal=None, importByName=None)
if to_offset(thunk.AddressOfData, importSection) > 0 and to_offset(thunk.AddressOfData, importSection) < len(self._bytes):
self.__parseThunkData(thunkData, importSection)
thunks.append(thunkData)
table_offset += 4
return thunks | [
"def",
"__parseThunks",
"(",
"self",
",",
"thunkRVA",
",",
"importSection",
")",
":",
"offset",
"=",
"to_offset",
"(",
"thunkRVA",
",",
"importSection",
")",
"table_offset",
"=",
"0",
"thunks",
"=",
"[",
"]",
"while",
"True",
":",
"thunk",
"=",
"IMAGE_THUN... | Parses the thunks and returns a list | [
"Parses",
"the",
"thunks",
"and",
"returns",
"a",
"list"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L672-L687 | train | 52,316 |
sashs/filebytes | filebytes/pe.py | PE.__parseThunkData | def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name) | python | def __parseThunkData(self, thunk,importSection):
"""Parses the data of a thunk and sets the data"""
offset = to_offset(thunk.header.AddressOfData, importSection)
if 0xf0000000 & thunk.header.AddressOfData == 0x80000000:
thunk.ordinal = thunk.header.AddressOfData & 0x0fffffff
else:
ibn = IMAGE_IMPORT_BY_NAME.from_buffer(importSection.raw, offset)
checkOffset(offset+2, importSection)
name = get_str(importSection.raw, offset+2)
thunk.importByName = ImportByNameData(header=ibn, hint=ibn.Hint, name=name) | [
"def",
"__parseThunkData",
"(",
"self",
",",
"thunk",
",",
"importSection",
")",
":",
"offset",
"=",
"to_offset",
"(",
"thunk",
".",
"header",
".",
"AddressOfData",
",",
"importSection",
")",
"if",
"0xf0000000",
"&",
"thunk",
".",
"header",
".",
"AddressOfDa... | Parses the data of a thunk and sets the data | [
"Parses",
"the",
"data",
"of",
"a",
"thunk",
"and",
"sets",
"the",
"data"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/pe.py#L689-L699 | train | 52,317 |
sashs/filebytes | filebytes/ctypes_helper.py | get_ptr | def get_ptr(data, offset=None, ptr_type=ctypes.c_void_p):
"""Returns a void pointer to the data"""
ptr = ctypes.cast(ctypes.pointer(data), ctypes.c_void_p)
if offset:
ptr = ctypes.c_void_p(ptr.value + offset)
if ptr_type != ctypes.c_void_p:
ptr = ctypes.cast(ptr, ptr_type)
return ptr | python | def get_ptr(data, offset=None, ptr_type=ctypes.c_void_p):
"""Returns a void pointer to the data"""
ptr = ctypes.cast(ctypes.pointer(data), ctypes.c_void_p)
if offset:
ptr = ctypes.c_void_p(ptr.value + offset)
if ptr_type != ctypes.c_void_p:
ptr = ctypes.cast(ptr, ptr_type)
return ptr | [
"def",
"get_ptr",
"(",
"data",
",",
"offset",
"=",
"None",
",",
"ptr_type",
"=",
"ctypes",
".",
"c_void_p",
")",
":",
"ptr",
"=",
"ctypes",
".",
"cast",
"(",
"ctypes",
".",
"pointer",
"(",
"data",
")",
",",
"ctypes",
".",
"c_void_p",
")",
"if",
"of... | Returns a void pointer to the data | [
"Returns",
"a",
"void",
"pointer",
"to",
"the",
"data"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/ctypes_helper.py#L33-L43 | train | 52,318 |
sashs/filebytes | filebytes/ctypes_helper.py | to_ubyte_array | def to_ubyte_array(barray):
"""Returns a c_ubyte_array filled with the given data of a bytearray or bytes"""
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs | python | def to_ubyte_array(barray):
"""Returns a c_ubyte_array filled with the given data of a bytearray or bytes"""
bs = (ctypes.c_ubyte * len(barray))()
pack_into('%ds' % len(barray), bs, 0, barray)
return bs | [
"def",
"to_ubyte_array",
"(",
"barray",
")",
":",
"bs",
"=",
"(",
"ctypes",
".",
"c_ubyte",
"*",
"len",
"(",
"barray",
")",
")",
"(",
")",
"pack_into",
"(",
"'%ds'",
"%",
"len",
"(",
"barray",
")",
",",
"bs",
",",
"0",
",",
"barray",
")",
"return... | Returns a c_ubyte_array filled with the given data of a bytearray or bytes | [
"Returns",
"a",
"c_ubyte_array",
"filled",
"with",
"the",
"given",
"data",
"of",
"a",
"bytearray",
"or",
"bytes"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/ctypes_helper.py#L48-L53 | train | 52,319 |
sashs/filebytes | filebytes/binary.py | Binary._readFile | def _readFile(self, fileName):
"""
Returns the bytes of the file.
"""
with open(fileName, 'rb') as binFile:
b = binFile.read()
return to_ubyte_array(b) | python | def _readFile(self, fileName):
"""
Returns the bytes of the file.
"""
with open(fileName, 'rb') as binFile:
b = binFile.read()
return to_ubyte_array(b) | [
"def",
"_readFile",
"(",
"self",
",",
"fileName",
")",
":",
"with",
"open",
"(",
"fileName",
",",
"'rb'",
")",
"as",
"binFile",
":",
"b",
"=",
"binFile",
".",
"read",
"(",
")",
"return",
"to_ubyte_array",
"(",
"b",
")"
] | Returns the bytes of the file. | [
"Returns",
"the",
"bytes",
"of",
"the",
"file",
"."
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/binary.py#L71-L77 | train | 52,320 |
sashs/filebytes | filebytes/elf.py | ELF._parseElfHeader | def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr) | python | def _parseElfHeader(self, data):
"""Returns the elf header"""
ehdr = self.__classes.EHDR.from_buffer(data)
return EhdrData(header=ehdr) | [
"def",
"_parseElfHeader",
"(",
"self",
",",
"data",
")",
":",
"ehdr",
"=",
"self",
".",
"__classes",
".",
"EHDR",
".",
"from_buffer",
"(",
"data",
")",
"return",
"EhdrData",
"(",
"header",
"=",
"ehdr",
")"
] | Returns the elf header | [
"Returns",
"the",
"elf",
"header"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L898-L901 | train | 52,321 |
sashs/filebytes | filebytes/elf.py | ELF._parseSegments | def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments | python | def _parseSegments(self, data, elfHeader):
"""Return a list of segments"""
offset = elfHeader.header.e_phoff
segments = []
for i in range(elfHeader.header.e_phnum):
phdr = self.__classes.PHDR.from_buffer(data, offset)
segment_bytes = (c_ubyte * phdr.p_filesz).from_buffer(data, phdr.p_offset)
phdrData = PhdrData(header=phdr, raw=segment_bytes, bytes=bytearray(segment_bytes), type=PT[phdr.p_type], vaddr=phdr.p_vaddr, offset=phdr.p_offset)
segments.append(phdrData)
offset += elfHeader.header.e_phentsize
return segments | [
"def",
"_parseSegments",
"(",
"self",
",",
"data",
",",
"elfHeader",
")",
":",
"offset",
"=",
"elfHeader",
".",
"header",
".",
"e_phoff",
"segments",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"elfHeader",
".",
"header",
".",
"e_phnum",
")",
":",
... | Return a list of segments | [
"Return",
"a",
"list",
"of",
"segments"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L903-L916 | train | 52,322 |
sashs/filebytes | filebytes/elf.py | ELF._parseSections | def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs | python | def _parseSections(self, data, elfHeader):
"""Returns a list of sections"""
offset = elfHeader.header.e_shoff
shdrs = []
for i in range(elfHeader.header.e_shnum):
shdr = self.__classes.SHDR.from_buffer(data, offset)
section_bytes = None
ba_section_bytes = None
if shdr.sh_type != SHT.NOBITS:
section_bytes = (c_ubyte * shdr.sh_size).from_buffer(data, shdr.sh_offset)
ba_section_bytes = bytearray(section_bytes)
shdrs.append(ShdrData(name=None,header=shdr, raw=section_bytes, bytes=ba_section_bytes))
offset += elfHeader.header.e_shentsize
if elfHeader.header.e_shstrndx != SHN.UNDEF:
strtab = shdrs[elfHeader.header.e_shstrndx]
strtab_offset = strtab.header.sh_offset
for section in shdrs:
section.name = get_str(strtab.raw, section.header.sh_name)
return shdrs | [
"def",
"_parseSections",
"(",
"self",
",",
"data",
",",
"elfHeader",
")",
":",
"offset",
"=",
"elfHeader",
".",
"header",
".",
"e_shoff",
"shdrs",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"elfHeader",
".",
"header",
".",
"e_shnum",
")",
":",
"s... | Returns a list of sections | [
"Returns",
"a",
"list",
"of",
"sections"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L918-L939 | train | 52,323 |
sashs/filebytes | filebytes/elf.py | ELF._parseSymbols | def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab) | python | def _parseSymbols(self, sections):
"""Sets a list of symbols in each DYNSYM and SYMTAB section"""
for section in sections:
strtab = sections[section.header.sh_link]
if section.header.sh_type in (int(SHT.DYNSYM), int(SHT.SYMTAB)):
section.symbols = self.__parseSymbolEntriesForSection(section, strtab) | [
"def",
"_parseSymbols",
"(",
"self",
",",
"sections",
")",
":",
"for",
"section",
"in",
"sections",
":",
"strtab",
"=",
"sections",
"[",
"section",
".",
"header",
".",
"sh_link",
"]",
"if",
"section",
".",
"header",
".",
"sh_type",
"in",
"(",
"int",
"(... | Sets a list of symbols in each DYNSYM and SYMTAB section | [
"Sets",
"a",
"list",
"of",
"symbols",
"in",
"each",
"DYNSYM",
"and",
"SYMTAB",
"section"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L941-L946 | train | 52,324 |
sashs/filebytes | filebytes/elf.py | ELF._parseRelocations | def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations | python | def _parseRelocations(self, sections):
"""Parses the relocations and add those to the section"""
for section in sections:
if section.header.sh_link != SHN.UNDEF and section.header.sh_type in (SHT.REL, SHT.RELA):
symbols = sections[section.header.sh_link].symbols
relocations = self.__parseRelocationEntries(section, symbols)
section.relocations = relocations | [
"def",
"_parseRelocations",
"(",
"self",
",",
"sections",
")",
":",
"for",
"section",
"in",
"sections",
":",
"if",
"section",
".",
"header",
".",
"sh_link",
"!=",
"SHN",
".",
"UNDEF",
"and",
"section",
".",
"header",
".",
"sh_type",
"in",
"(",
"SHT",
"... | Parses the relocations and add those to the section | [
"Parses",
"the",
"relocations",
"and",
"add",
"those",
"to",
"the",
"section"
] | 41ee009832aba19603f33d1fd3483b84d6684ebf | https://github.com/sashs/filebytes/blob/41ee009832aba19603f33d1fd3483b84d6684ebf/filebytes/elf.py#L965-L971 | train | 52,325 |
pyqg/pyqg | pyqg/model.py | run_with_snapshots | def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
"""Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return | python | def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
"""Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return | [
"def",
"run_with_snapshots",
"(",
"self",
",",
"tsnapstart",
"=",
"0.",
",",
"tsnapint",
"=",
"432000.",
")",
":",
"tsnapints",
"=",
"np",
".",
"ceil",
"(",
"tsnapint",
"/",
"self",
".",
"dt",
")",
"while",
"(",
"self",
".",
"t",
"<",
"self",
".",
... | Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield. | [
"Run",
"the",
"model",
"forward",
"yielding",
"to",
"user",
"code",
"at",
"specified",
"intervals",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/model.py#L210-L228 | train | 52,326 |
pyqg/pyqg | pyqg/model.py | vertical_modes | def vertical_modes(self):
""" Calculate standard vertical modes. Simply
the eigenvectors of the stretching matrix S """
evals,evecs = np.linalg.eig(-self.S)
asort = evals.argsort()
# deformation wavenumbers and radii
self.kdi2 = evals[asort]
self.radii = np.zeros_like(self.kdi2)
self.radii[0] = self.g*self.H/np.abs(self.f) # barotropic def. radius
self.radii[1:] = 1./np.sqrt(self.kdi2[1:])
# eigenstructure
self.pmodes = evecs[:,asort]
# normalize to have unit L2-norm
Ai = (self.H / (self.Hi[:,np.newaxis]*(self.pmodes**2)).sum(axis=0))**0.5
self.pmodes = Ai[np.newaxis,:]*self.pmodes | python | def vertical_modes(self):
""" Calculate standard vertical modes. Simply
the eigenvectors of the stretching matrix S """
evals,evecs = np.linalg.eig(-self.S)
asort = evals.argsort()
# deformation wavenumbers and radii
self.kdi2 = evals[asort]
self.radii = np.zeros_like(self.kdi2)
self.radii[0] = self.g*self.H/np.abs(self.f) # barotropic def. radius
self.radii[1:] = 1./np.sqrt(self.kdi2[1:])
# eigenstructure
self.pmodes = evecs[:,asort]
# normalize to have unit L2-norm
Ai = (self.H / (self.Hi[:,np.newaxis]*(self.pmodes**2)).sum(axis=0))**0.5
self.pmodes = Ai[np.newaxis,:]*self.pmodes | [
"def",
"vertical_modes",
"(",
"self",
")",
":",
"evals",
",",
"evecs",
"=",
"np",
".",
"linalg",
".",
"eig",
"(",
"-",
"self",
".",
"S",
")",
"asort",
"=",
"evals",
".",
"argsort",
"(",
")",
"# deformation wavenumbers and radii",
"self",
".",
"kdi2",
"... | Calculate standard vertical modes. Simply
the eigenvectors of the stretching matrix S | [
"Calculate",
"standard",
"vertical",
"modes",
".",
"Simply",
"the",
"eigenvectors",
"of",
"the",
"stretching",
"matrix",
"S"
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/model.py#L236-L255 | train | 52,327 |
pyqg/pyqg | pyqg/sqg_model.py | SQGModel.set_U | def set_U(self, U):
"""Set background zonal flow"""
self.Ubg = np.asarray(U)[np.newaxis,...] | python | def set_U(self, U):
"""Set background zonal flow"""
self.Ubg = np.asarray(U)[np.newaxis,...] | [
"def",
"set_U",
"(",
"self",
",",
"U",
")",
":",
"self",
".",
"Ubg",
"=",
"np",
".",
"asarray",
"(",
"U",
")",
"[",
"np",
".",
"newaxis",
",",
"...",
"]"
] | Set background zonal flow | [
"Set",
"background",
"zonal",
"flow"
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/sqg_model.py#L77-L79 | train | 52,328 |
pyqg/pyqg | pyqg/particles.py | LagrangianParticleArray2D._rk4_integrate | def _rk4_integrate(self, x, y, uv0fun, uv1fun, dt):
"""Integrates positions x, y using velocity functions
uv0fun, uv1fun. Returns dx and dy, the displacements."""
u0, v0 = uv0fun(x, y)
k1u = dt*u0
k1v = dt*v0
x11 = self._wrap_x(x + 0.5*k1u)
y11 = self._wrap_y(y + 0.5*k1v)
u11, v11 = uv1fun(x11, y11)
k2u = dt*u11
k2v = dt*v11
x12 = self._wrap_x(x + 0.5*k2u)
y12 = self._wrap_y(y + 0.5*k2v)
u12, v12 = uv1fun(x12, y12)
k3u = dt*u12
k3v = dt*v12
x13 = self._wrap_x(x + k3u)
y13 = self._wrap_y(y + k3v)
u13, v13 = uv1fun(x13, y13)
k4u = dt*u13
k4v = dt*v13
# update
dx = 6**-1*(k1u + 2*k2u + 2*k3u + k4u)
dy = 6**-1*(k1v + 2*k2v + 2*k3v + k4v)
return dx, dy | python | def _rk4_integrate(self, x, y, uv0fun, uv1fun, dt):
"""Integrates positions x, y using velocity functions
uv0fun, uv1fun. Returns dx and dy, the displacements."""
u0, v0 = uv0fun(x, y)
k1u = dt*u0
k1v = dt*v0
x11 = self._wrap_x(x + 0.5*k1u)
y11 = self._wrap_y(y + 0.5*k1v)
u11, v11 = uv1fun(x11, y11)
k2u = dt*u11
k2v = dt*v11
x12 = self._wrap_x(x + 0.5*k2u)
y12 = self._wrap_y(y + 0.5*k2v)
u12, v12 = uv1fun(x12, y12)
k3u = dt*u12
k3v = dt*v12
x13 = self._wrap_x(x + k3u)
y13 = self._wrap_y(y + k3v)
u13, v13 = uv1fun(x13, y13)
k4u = dt*u13
k4v = dt*v13
# update
dx = 6**-1*(k1u + 2*k2u + 2*k3u + k4u)
dy = 6**-1*(k1v + 2*k2v + 2*k3v + k4v)
return dx, dy | [
"def",
"_rk4_integrate",
"(",
"self",
",",
"x",
",",
"y",
",",
"uv0fun",
",",
"uv1fun",
",",
"dt",
")",
":",
"u0",
",",
"v0",
"=",
"uv0fun",
"(",
"x",
",",
"y",
")",
"k1u",
"=",
"dt",
"*",
"u0",
"k1v",
"=",
"dt",
"*",
"v0",
"x11",
"=",
"sel... | Integrates positions x, y using velocity functions
uv0fun, uv1fun. Returns dx and dy, the displacements. | [
"Integrates",
"positions",
"x",
"y",
"using",
"velocity",
"functions",
"uv0fun",
"uv1fun",
".",
"Returns",
"dx",
"and",
"dy",
"the",
"displacements",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/particles.py#L83-L108 | train | 52,329 |
pyqg/pyqg | pyqg/particles.py | LagrangianParticleArray2D._distance | def _distance(self, x0, y0, x1, y1):
"""Utitlity function to compute distance between points."""
dx = x1-x0
dy = y1-y0
# roll displacements across the borders
if self.pix:
dx[ dx > self.Lx/2 ] -= self.Lx
dx[ dx < -self.Lx/2 ] += self.Lx
if self.piy:
dy[ dy > self.Ly/2 ] -= self.Ly
dy[ dy < -self.Ly/2 ] += self.Ly
return dx, dy | python | def _distance(self, x0, y0, x1, y1):
"""Utitlity function to compute distance between points."""
dx = x1-x0
dy = y1-y0
# roll displacements across the borders
if self.pix:
dx[ dx > self.Lx/2 ] -= self.Lx
dx[ dx < -self.Lx/2 ] += self.Lx
if self.piy:
dy[ dy > self.Ly/2 ] -= self.Ly
dy[ dy < -self.Ly/2 ] += self.Ly
return dx, dy | [
"def",
"_distance",
"(",
"self",
",",
"x0",
",",
"y0",
",",
"x1",
",",
"y1",
")",
":",
"dx",
"=",
"x1",
"-",
"x0",
"dy",
"=",
"y1",
"-",
"y0",
"# roll displacements across the borders",
"if",
"self",
".",
"pix",
":",
"dx",
"[",
"dx",
">",
"self",
... | Utitlity function to compute distance between points. | [
"Utitlity",
"function",
"to",
"compute",
"distance",
"between",
"points",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/particles.py#L124-L135 | train | 52,330 |
pyqg/pyqg | pyqg/diagnostic_tools.py | spec_var | def spec_var(model, ph):
"""Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph`
"""
var_dens = 2. * np.abs(ph)**2 / model.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
return var_dens.sum(axis=(-1,-2)) | python | def spec_var(model, ph):
"""Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph`
"""
var_dens = 2. * np.abs(ph)**2 / model.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] /= 2
var_dens[...,-1] /= 2
return var_dens.sum(axis=(-1,-2)) | [
"def",
"spec_var",
"(",
"model",
",",
"ph",
")",
":",
"var_dens",
"=",
"2.",
"*",
"np",
".",
"abs",
"(",
"ph",
")",
"**",
"2",
"/",
"model",
".",
"M",
"**",
"2",
"# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2",
"var_dens",
"[",
"...",
... | Compute variance of ``p`` from Fourier coefficients ``ph``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
var_dens : float
The variance of `ph` | [
"Compute",
"variance",
"of",
"p",
"from",
"Fourier",
"coefficients",
"ph",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/diagnostic_tools.py#L7-L27 | train | 52,331 |
pyqg/pyqg | pyqg/diagnostic_tools.py | spec_sum | def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.*ph2
ph2[...,0] = ph2[...,0]/2.
ph2[...,-1] = ph2[...,-1]/2.
return ph2.sum(axis=(-1,-2)) | python | def spec_sum(ph2):
"""Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2`
"""
ph2 = 2.*ph2
ph2[...,0] = ph2[...,0]/2.
ph2[...,-1] = ph2[...,-1]/2.
return ph2.sum(axis=(-1,-2)) | [
"def",
"spec_sum",
"(",
"ph2",
")",
":",
"ph2",
"=",
"2.",
"*",
"ph2",
"ph2",
"[",
"...",
",",
"0",
"]",
"=",
"ph2",
"[",
"...",
",",
"0",
"]",
"/",
"2.",
"ph2",
"[",
"...",
",",
"-",
"1",
"]",
"=",
"ph2",
"[",
"...",
",",
"-",
"1",
"]"... | Compute total spectral sum of the real spectral quantity``ph^2``.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph2 : real array
The field on which to compute the sum
Returns
-------
var_dens : float
The sum of `ph2` | [
"Compute",
"total",
"spectral",
"sum",
"of",
"the",
"real",
"spectral",
"quantity",
"ph^2",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/diagnostic_tools.py#L30-L50 | train | 52,332 |
pyqg/pyqg | pyqg/diagnostic_tools.py | calc_ispec | def calc_ispec(model, ph):
"""Compute isotropic spectrum `phr` of `ph` from 2D spectrum.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
kr : array
isotropic wavenumber
phr : array
isotropic spectrum
"""
if model.kk.max()>model.ll.max():
kmax = model.ll.max()
else:
kmax = model.kk.max()
# create radial wavenumber
dkr = np.sqrt(model.dk**2 + model.dl**2)
kr = np.arange(dkr/2.,kmax+dkr,dkr)
phr = np.zeros(kr.size)
for i in range(kr.size):
fkr = (model.wv>=kr[i]-dkr/2) & (model.wv<=kr[i]+dkr/2)
dth = pi / (fkr.sum()-1)
phr[i] = ph[fkr].sum() * kr[i] * dth
return kr, phr | python | def calc_ispec(model, ph):
"""Compute isotropic spectrum `phr` of `ph` from 2D spectrum.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
kr : array
isotropic wavenumber
phr : array
isotropic spectrum
"""
if model.kk.max()>model.ll.max():
kmax = model.ll.max()
else:
kmax = model.kk.max()
# create radial wavenumber
dkr = np.sqrt(model.dk**2 + model.dl**2)
kr = np.arange(dkr/2.,kmax+dkr,dkr)
phr = np.zeros(kr.size)
for i in range(kr.size):
fkr = (model.wv>=kr[i]-dkr/2) & (model.wv<=kr[i]+dkr/2)
dth = pi / (fkr.sum()-1)
phr[i] = ph[fkr].sum() * kr[i] * dth
return kr, phr | [
"def",
"calc_ispec",
"(",
"model",
",",
"ph",
")",
":",
"if",
"model",
".",
"kk",
".",
"max",
"(",
")",
">",
"model",
".",
"ll",
".",
"max",
"(",
")",
":",
"kmax",
"=",
"model",
".",
"ll",
".",
"max",
"(",
")",
"else",
":",
"kmax",
"=",
"mo... | Compute isotropic spectrum `phr` of `ph` from 2D spectrum.
Parameters
----------
model : pyqg.Model instance
The model object from which `ph` originates
ph : complex array
The field on which to compute the variance
Returns
-------
kr : array
isotropic wavenumber
phr : array
isotropic spectrum | [
"Compute",
"isotropic",
"spectrum",
"phr",
"of",
"ph",
"from",
"2D",
"spectrum",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/diagnostic_tools.py#L53-L86 | train | 52,333 |
pyqg/pyqg | pyqg/layered_model.py | LayeredModel._initialize_stretching_matrix | def _initialize_stretching_matrix(self):
""" Set up the stretching matrix """
self.S = np.zeros((self.nz, self.nz))
if (self.nz==2) and (self.rd) and (self.delta):
self.del1 = self.delta/(self.delta+1.)
self.del2 = (self.delta+1.)**-1
self.Us = self.Ubg[0]-self.Ubg[1]
self.F1 = self.rd**-2 / (1.+self.delta)
self.F2 = self.delta*self.F1
self.S[0,0], self.S[0,1] = -self.F1, self.F1
self.S[1,0], self.S[1,1] = self.F2, -self.F2
else:
for i in range(self.nz):
if i == 0:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i]
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
elif i == self.nz-1:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
else:
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i] = -(self.f2/self.Hi[i]/self.gpi[i] +
self.f2/self.Hi[i]/self.gpi[i-1])
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i] | python | def _initialize_stretching_matrix(self):
""" Set up the stretching matrix """
self.S = np.zeros((self.nz, self.nz))
if (self.nz==2) and (self.rd) and (self.delta):
self.del1 = self.delta/(self.delta+1.)
self.del2 = (self.delta+1.)**-1
self.Us = self.Ubg[0]-self.Ubg[1]
self.F1 = self.rd**-2 / (1.+self.delta)
self.F2 = self.delta*self.F1
self.S[0,0], self.S[0,1] = -self.F1, self.F1
self.S[1,0], self.S[1,1] = self.F2, -self.F2
else:
for i in range(self.nz):
if i == 0:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i]
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i]
elif i == self.nz-1:
self.S[i,i] = -self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
else:
self.S[i,i-1] = self.f2/self.Hi[i]/self.gpi[i-1]
self.S[i,i] = -(self.f2/self.Hi[i]/self.gpi[i] +
self.f2/self.Hi[i]/self.gpi[i-1])
self.S[i,i+1] = self.f2/self.Hi[i]/self.gpi[i] | [
"def",
"_initialize_stretching_matrix",
"(",
"self",
")",
":",
"self",
".",
"S",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"nz",
",",
"self",
".",
"nz",
")",
")",
"if",
"(",
"self",
".",
"nz",
"==",
"2",
")",
"and",
"(",
"self",
".",
"rd"... | Set up the stretching matrix | [
"Set",
"up",
"the",
"stretching",
"matrix"
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/layered_model.py#L130-L162 | train | 52,334 |
pyqg/pyqg | pyqg/qg_model.py | QGModel.set_q1q2 | def set_q1q2(self, q1, q2, check=False):
"""Set upper and lower layer PV anomalies.
Parameters
----------
q1 : array-like
Upper layer PV anomaly in spatial coordinates.
q1 : array-like
Lower layer PV anomaly in spatial coordinates.
"""
self.set_q(np.vstack([q1[np.newaxis,:,:], q2[np.newaxis,:,:]]))
#self.q[0] = q1
#self.q[1] = q2
# initialize spectral PV
#self.qh = self.fft2(self.q)
# check that it works
if check:
np.testing.assert_allclose(self.q1, q1)
np.testing.assert_allclose(self.q1, self.ifft2(self.qh1)) | python | def set_q1q2(self, q1, q2, check=False):
"""Set upper and lower layer PV anomalies.
Parameters
----------
q1 : array-like
Upper layer PV anomaly in spatial coordinates.
q1 : array-like
Lower layer PV anomaly in spatial coordinates.
"""
self.set_q(np.vstack([q1[np.newaxis,:,:], q2[np.newaxis,:,:]]))
#self.q[0] = q1
#self.q[1] = q2
# initialize spectral PV
#self.qh = self.fft2(self.q)
# check that it works
if check:
np.testing.assert_allclose(self.q1, q1)
np.testing.assert_allclose(self.q1, self.ifft2(self.qh1)) | [
"def",
"set_q1q2",
"(",
"self",
",",
"q1",
",",
"q2",
",",
"check",
"=",
"False",
")",
":",
"self",
".",
"set_q",
"(",
"np",
".",
"vstack",
"(",
"[",
"q1",
"[",
"np",
".",
"newaxis",
",",
":",
",",
":",
"]",
",",
"q2",
"[",
"np",
".",
"newa... | Set upper and lower layer PV anomalies.
Parameters
----------
q1 : array-like
Upper layer PV anomaly in spatial coordinates.
q1 : array-like
Lower layer PV anomaly in spatial coordinates. | [
"Set",
"upper",
"and",
"lower",
"layer",
"PV",
"anomalies",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/qg_model.py#L170-L191 | train | 52,335 |
pyqg/pyqg | pyqg/qg_model.py | QGModel.set_U1U2 | def set_U1U2(self, U1, U2):
"""Set background zonal flow.
Parameters
----------
U1 : number
Upper layer flow. Units: m/s
U2 : number
Lower layer flow. Units: m/s
"""
if len(np.shape(U1)) == 0:
U1 = U1 * np.ones((self.ny))
if len(np.shape(U2)) == 0:
U2 = U2 * np.ones((self.ny))
#self.Ubg = np.array([U1,U2])[:,np.newaxis,np.newaxis]
self.U1 = U1
self.U2 = U2
self.Ubg = np.array([U1,U2]) | python | def set_U1U2(self, U1, U2):
"""Set background zonal flow.
Parameters
----------
U1 : number
Upper layer flow. Units: m/s
U2 : number
Lower layer flow. Units: m/s
"""
if len(np.shape(U1)) == 0:
U1 = U1 * np.ones((self.ny))
if len(np.shape(U2)) == 0:
U2 = U2 * np.ones((self.ny))
#self.Ubg = np.array([U1,U2])[:,np.newaxis,np.newaxis]
self.U1 = U1
self.U2 = U2
self.Ubg = np.array([U1,U2]) | [
"def",
"set_U1U2",
"(",
"self",
",",
"U1",
",",
"U2",
")",
":",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"U1",
")",
")",
"==",
"0",
":",
"U1",
"=",
"U1",
"*",
"np",
".",
"ones",
"(",
"(",
"self",
".",
"ny",
")",
")",
"if",
"len",
"(",
... | Set background zonal flow.
Parameters
----------
U1 : number
Upper layer flow. Units: m/s
U2 : number
Lower layer flow. Units: m/s | [
"Set",
"background",
"zonal",
"flow",
"."
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/qg_model.py#L193-L211 | train | 52,336 |
pyqg/pyqg | pyqg/qg_model.py | QGModel._initialize_model_diagnostics | def _initialize_model_diagnostics(self):
"""Extra diagnostics for two-layer model"""
self.add_diagnostic('entspec',
description='barotropic enstrophy spectrum',
function= (lambda self:
np.abs(self.del1*self.qh[0] + self.del2*self.qh[1])**2.)
)
self.add_diagnostic('APEflux',
description='spectral flux of available potential energy',
function= (lambda self:
self.rd**-2 * self.del1*self.del2 *
np.real((self.ph[0]-self.ph[1])*np.conj(self.Jptpc)) )
)
self.add_diagnostic('KEflux',
description='spectral flux of kinetic energy',
function= (lambda self:
np.real(self.del1*self.ph[0]*np.conj(self.Jpxi[0])) +
np.real(self.del2*self.ph[1]*np.conj(self.Jpxi[1])) )
)
self.add_diagnostic('APEgenspec',
description='spectrum of APE generation',
function= (lambda self: self.U[:,np.newaxis] * self.rd**-2 * self.del1 * self.del2 *
np.real(1j*self.k*(self.del1*self.ph[0] + self.del2*self.ph[1]) *
np.conj(self.ph[0] - self.ph[1])) )
)
self.add_diagnostic('APEgen',
description='total APE generation',
function= (lambda self: self.U * self.rd**-2 * self.del1 * self.del2 *
np.real((1j*self.k*
(self.del1*self.ph[0] + self.del2*self.ph[1]) *
np.conj(self.ph[0] - self.ph[1])).sum()
+(1j*self.k[:,1:-2]*
(self.del1*self.ph[0,:,1:-2] + self.del2*self.ph[1,:,1:-2]) *
np.conj(self.ph[0,:,1:-2] - self.ph[1,:,1:-2])).sum()) /
(self.M**2) )
) | python | def _initialize_model_diagnostics(self):
"""Extra diagnostics for two-layer model"""
self.add_diagnostic('entspec',
description='barotropic enstrophy spectrum',
function= (lambda self:
np.abs(self.del1*self.qh[0] + self.del2*self.qh[1])**2.)
)
self.add_diagnostic('APEflux',
description='spectral flux of available potential energy',
function= (lambda self:
self.rd**-2 * self.del1*self.del2 *
np.real((self.ph[0]-self.ph[1])*np.conj(self.Jptpc)) )
)
self.add_diagnostic('KEflux',
description='spectral flux of kinetic energy',
function= (lambda self:
np.real(self.del1*self.ph[0]*np.conj(self.Jpxi[0])) +
np.real(self.del2*self.ph[1]*np.conj(self.Jpxi[1])) )
)
self.add_diagnostic('APEgenspec',
description='spectrum of APE generation',
function= (lambda self: self.U[:,np.newaxis] * self.rd**-2 * self.del1 * self.del2 *
np.real(1j*self.k*(self.del1*self.ph[0] + self.del2*self.ph[1]) *
np.conj(self.ph[0] - self.ph[1])) )
)
self.add_diagnostic('APEgen',
description='total APE generation',
function= (lambda self: self.U * self.rd**-2 * self.del1 * self.del2 *
np.real((1j*self.k*
(self.del1*self.ph[0] + self.del2*self.ph[1]) *
np.conj(self.ph[0] - self.ph[1])).sum()
+(1j*self.k[:,1:-2]*
(self.del1*self.ph[0,:,1:-2] + self.del2*self.ph[1,:,1:-2]) *
np.conj(self.ph[0,:,1:-2] - self.ph[1,:,1:-2])).sum()) /
(self.M**2) )
) | [
"def",
"_initialize_model_diagnostics",
"(",
"self",
")",
":",
"self",
".",
"add_diagnostic",
"(",
"'entspec'",
",",
"description",
"=",
"'barotropic enstrophy spectrum'",
",",
"function",
"=",
"(",
"lambda",
"self",
":",
"np",
".",
"abs",
"(",
"self",
".",
"d... | Extra diagnostics for two-layer model | [
"Extra",
"diagnostics",
"for",
"two",
"-",
"layer",
"model"
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/qg_model.py#L246-L286 | train | 52,337 |
pyqg/pyqg | pyqg/point_vortex.py | PointVortexArray2D.calc_uv | def calc_uv(self, x, y, prev=False):
"""Calculate velocity at x and y points due to vortex velocity field.
Assumes x and y are vortex positions and are ordered the same as
x0 and y0. The ordering is used to neglect to vortex self interaction."""
assert len(x) == self.N
assert len(y) == self.N
u = np.zeros(self.N, self.x.dtype)
v = np.zeros(self.N, self.y.dtype)
for n in xrange(self.N):
# don't include self interaction
if prev:
x0 = self.xprev[np.r_[:n,n+1:self.N]]
y0 = self.yprev[np.r_[:n,n+1:self.N]]
else:
x0 = self.x[np.r_[:n,n+1:self.N]]
y0 = self.y[np.r_[:n,n+1:self.N]]
s0 = self.s[np.r_[:n,n+1:self.N]]
u0, v0 = self.uv_at_xy(x[n], y[n], x0, y0, s0)
u[n] = u0.sum()
v[n] = v0.sum()
return u, v | python | def calc_uv(self, x, y, prev=False):
"""Calculate velocity at x and y points due to vortex velocity field.
Assumes x and y are vortex positions and are ordered the same as
x0 and y0. The ordering is used to neglect to vortex self interaction."""
assert len(x) == self.N
assert len(y) == self.N
u = np.zeros(self.N, self.x.dtype)
v = np.zeros(self.N, self.y.dtype)
for n in xrange(self.N):
# don't include self interaction
if prev:
x0 = self.xprev[np.r_[:n,n+1:self.N]]
y0 = self.yprev[np.r_[:n,n+1:self.N]]
else:
x0 = self.x[np.r_[:n,n+1:self.N]]
y0 = self.y[np.r_[:n,n+1:self.N]]
s0 = self.s[np.r_[:n,n+1:self.N]]
u0, v0 = self.uv_at_xy(x[n], y[n], x0, y0, s0)
u[n] = u0.sum()
v[n] = v0.sum()
return u, v | [
"def",
"calc_uv",
"(",
"self",
",",
"x",
",",
"y",
",",
"prev",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"x",
")",
"==",
"self",
".",
"N",
"assert",
"len",
"(",
"y",
")",
"==",
"self",
".",
"N",
"u",
"=",
"np",
".",
"zeros",
"(",
"self... | Calculate velocity at x and y points due to vortex velocity field.
Assumes x and y are vortex positions and are ordered the same as
x0 and y0. The ordering is used to neglect to vortex self interaction. | [
"Calculate",
"velocity",
"at",
"x",
"and",
"y",
"points",
"due",
"to",
"vortex",
"velocity",
"field",
".",
"Assumes",
"x",
"and",
"y",
"are",
"vortex",
"positions",
"and",
"are",
"ordered",
"the",
"same",
"as",
"x0",
"and",
"y0",
".",
"The",
"ordering",
... | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/point_vortex.py#L32-L52 | train | 52,338 |
pyqg/pyqg | pyqg/point_vortex.py | PointVortexArray2D.uv_at_xy | def uv_at_xy(self, x, y, x0, y0, s0):
"""Returns two arrays of u, v"""
dx, dy = self.distance(x0, y0, x, y)
#print 'dx, dy:', dx, dy
rr2 = (dx**2 + dy**2)**-1
u = - s0 * dy * r_twopi * rr2
v = s0 * dx * r_twopi * rr2
#print 'u, v', u, v
return u, v | python | def uv_at_xy(self, x, y, x0, y0, s0):
"""Returns two arrays of u, v"""
dx, dy = self.distance(x0, y0, x, y)
#print 'dx, dy:', dx, dy
rr2 = (dx**2 + dy**2)**-1
u = - s0 * dy * r_twopi * rr2
v = s0 * dx * r_twopi * rr2
#print 'u, v', u, v
return u, v | [
"def",
"uv_at_xy",
"(",
"self",
",",
"x",
",",
"y",
",",
"x0",
",",
"y0",
",",
"s0",
")",
":",
"dx",
",",
"dy",
"=",
"self",
".",
"distance",
"(",
"x0",
",",
"y0",
",",
"x",
",",
"y",
")",
"#print 'dx, dy:', dx, dy",
"rr2",
"=",
"(",
"dx",
"*... | Returns two arrays of u, v | [
"Returns",
"two",
"arrays",
"of",
"u",
"v"
] | 4f41584a12bcbf8657785b8cb310fa5065ecabd1 | https://github.com/pyqg/pyqg/blob/4f41584a12bcbf8657785b8cb310fa5065ecabd1/pyqg/point_vortex.py#L54-L62 | train | 52,339 |
brentp/interlap | interlap.py | InterLap.find | def find(self, other):
"""Return an interable of elements that overlap other in the tree."""
iset = self._iset
l = binsearch_left_start(iset, other[0] - self._maxlen, 0, len(iset))
r = binsearch_right_end(iset, other[1], 0, len(iset))
iopts = iset[l:r]
iiter = (s for s in iopts if s[0] <= other[1] and s[1] >= other[0])
for o in iiter: yield o | python | def find(self, other):
"""Return an interable of elements that overlap other in the tree."""
iset = self._iset
l = binsearch_left_start(iset, other[0] - self._maxlen, 0, len(iset))
r = binsearch_right_end(iset, other[1], 0, len(iset))
iopts = iset[l:r]
iiter = (s for s in iopts if s[0] <= other[1] and s[1] >= other[0])
for o in iiter: yield o | [
"def",
"find",
"(",
"self",
",",
"other",
")",
":",
"iset",
"=",
"self",
".",
"_iset",
"l",
"=",
"binsearch_left_start",
"(",
"iset",
",",
"other",
"[",
"0",
"]",
"-",
"self",
".",
"_maxlen",
",",
"0",
",",
"len",
"(",
"iset",
")",
")",
"r",
"=... | Return an interable of elements that overlap other in the tree. | [
"Return",
"an",
"interable",
"of",
"elements",
"that",
"overlap",
"other",
"in",
"the",
"tree",
"."
] | 3c4a5923c97a5d9a11571e0c9ea5bb7ea4e784ee | https://github.com/brentp/interlap/blob/3c4a5923c97a5d9a11571e0c9ea5bb7ea4e784ee/interlap.py#L153-L160 | train | 52,340 |
gumblex/zhconv | zhconv/zhconv.py | loaddict | def loaddict(filename=DICTIONARY):
"""
Load the dictionary from a specific JSON file.
"""
global zhcdicts
if zhcdicts:
return
if filename == _DEFAULT_DICT:
zhcdicts = json.loads(get_module_res(filename).read().decode('utf-8'))
else:
with open(filename, 'rb') as f:
zhcdicts = json.loads(f.read().decode('utf-8'))
zhcdicts['SIMPONLY'] = frozenset(zhcdicts['SIMPONLY'])
zhcdicts['TRADONLY'] = frozenset(zhcdicts['TRADONLY']) | python | def loaddict(filename=DICTIONARY):
"""
Load the dictionary from a specific JSON file.
"""
global zhcdicts
if zhcdicts:
return
if filename == _DEFAULT_DICT:
zhcdicts = json.loads(get_module_res(filename).read().decode('utf-8'))
else:
with open(filename, 'rb') as f:
zhcdicts = json.loads(f.read().decode('utf-8'))
zhcdicts['SIMPONLY'] = frozenset(zhcdicts['SIMPONLY'])
zhcdicts['TRADONLY'] = frozenset(zhcdicts['TRADONLY']) | [
"def",
"loaddict",
"(",
"filename",
"=",
"DICTIONARY",
")",
":",
"global",
"zhcdicts",
"if",
"zhcdicts",
":",
"return",
"if",
"filename",
"==",
"_DEFAULT_DICT",
":",
"zhcdicts",
"=",
"json",
".",
"loads",
"(",
"get_module_res",
"(",
"filename",
")",
".",
"... | Load the dictionary from a specific JSON file. | [
"Load",
"the",
"dictionary",
"from",
"a",
"specific",
"JSON",
"file",
"."
] | 925c0f9494f3439bc05526e7e89bb5f0ab3d185e | https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L68-L81 | train | 52,341 |
gumblex/zhconv | zhconv/zhconv.py | getdict | def getdict(locale):
"""
Generate or get convertion dict cache for certain locale.
Dictionaries are loaded on demand.
"""
global zhcdicts, dict_zhcn, dict_zhsg, dict_zhtw, dict_zhhk, pfsdict
if zhcdicts is None:
loaddict(DICTIONARY)
if locale == 'zh-cn':
if dict_zhcn:
got = dict_zhcn
else:
dict_zhcn = zhcdicts['zh2Hans'].copy()
dict_zhcn.update(zhcdicts['zh2CN'])
got = dict_zhcn
elif locale == 'zh-tw':
if dict_zhtw:
got = dict_zhtw
else:
dict_zhtw = zhcdicts['zh2Hant'].copy()
dict_zhtw.update(zhcdicts['zh2TW'])
got = dict_zhtw
elif locale == 'zh-hk' or locale == 'zh-mo':
if dict_zhhk:
got = dict_zhhk
else:
dict_zhhk = zhcdicts['zh2Hant'].copy()
dict_zhhk.update(zhcdicts['zh2HK'])
got = dict_zhhk
elif locale == 'zh-sg' or locale == 'zh-my':
if dict_zhsg:
got = dict_zhsg
else:
dict_zhsg = zhcdicts['zh2Hans'].copy()
dict_zhsg.update(zhcdicts['zh2SG'])
got = dict_zhsg
elif locale == 'zh-hans':
got = zhcdicts['zh2Hans']
elif locale == 'zh-hant':
got = zhcdicts['zh2Hant']
else:
got = {}
if locale not in pfsdict:
pfsdict[locale] = getpfset(got)
return got | python | def getdict(locale):
"""
Generate or get convertion dict cache for certain locale.
Dictionaries are loaded on demand.
"""
global zhcdicts, dict_zhcn, dict_zhsg, dict_zhtw, dict_zhhk, pfsdict
if zhcdicts is None:
loaddict(DICTIONARY)
if locale == 'zh-cn':
if dict_zhcn:
got = dict_zhcn
else:
dict_zhcn = zhcdicts['zh2Hans'].copy()
dict_zhcn.update(zhcdicts['zh2CN'])
got = dict_zhcn
elif locale == 'zh-tw':
if dict_zhtw:
got = dict_zhtw
else:
dict_zhtw = zhcdicts['zh2Hant'].copy()
dict_zhtw.update(zhcdicts['zh2TW'])
got = dict_zhtw
elif locale == 'zh-hk' or locale == 'zh-mo':
if dict_zhhk:
got = dict_zhhk
else:
dict_zhhk = zhcdicts['zh2Hant'].copy()
dict_zhhk.update(zhcdicts['zh2HK'])
got = dict_zhhk
elif locale == 'zh-sg' or locale == 'zh-my':
if dict_zhsg:
got = dict_zhsg
else:
dict_zhsg = zhcdicts['zh2Hans'].copy()
dict_zhsg.update(zhcdicts['zh2SG'])
got = dict_zhsg
elif locale == 'zh-hans':
got = zhcdicts['zh2Hans']
elif locale == 'zh-hant':
got = zhcdicts['zh2Hant']
else:
got = {}
if locale not in pfsdict:
pfsdict[locale] = getpfset(got)
return got | [
"def",
"getdict",
"(",
"locale",
")",
":",
"global",
"zhcdicts",
",",
"dict_zhcn",
",",
"dict_zhsg",
",",
"dict_zhtw",
",",
"dict_zhhk",
",",
"pfsdict",
"if",
"zhcdicts",
"is",
"None",
":",
"loaddict",
"(",
"DICTIONARY",
")",
"if",
"locale",
"==",
"'zh-cn'... | Generate or get convertion dict cache for certain locale.
Dictionaries are loaded on demand. | [
"Generate",
"or",
"get",
"convertion",
"dict",
"cache",
"for",
"certain",
"locale",
".",
"Dictionaries",
"are",
"loaded",
"on",
"demand",
"."
] | 925c0f9494f3439bc05526e7e89bb5f0ab3d185e | https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L83-L127 | train | 52,342 |
gumblex/zhconv | zhconv/zhconv.py | convtable2dict | def convtable2dict(convtable, locale, update=None):
"""
Convert a list of conversion dict to a dict for a certain locale.
>>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items())
[('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')]
"""
rdict = update.copy() if update else {}
for r in convtable:
if ':uni' in r:
if locale in r:
rdict[r[':uni']] = r[locale]
elif locale[:-1] == 'zh-han':
if locale in r:
for word in r.values():
rdict[word] = r[locale]
else:
v = fallback(locale, r)
for word in r.values():
rdict[word] = v
return rdict | python | def convtable2dict(convtable, locale, update=None):
"""
Convert a list of conversion dict to a dict for a certain locale.
>>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items())
[('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')]
"""
rdict = update.copy() if update else {}
for r in convtable:
if ':uni' in r:
if locale in r:
rdict[r[':uni']] = r[locale]
elif locale[:-1] == 'zh-han':
if locale in r:
for word in r.values():
rdict[word] = r[locale]
else:
v = fallback(locale, r)
for word in r.values():
rdict[word] = v
return rdict | [
"def",
"convtable2dict",
"(",
"convtable",
",",
"locale",
",",
"update",
"=",
"None",
")",
":",
"rdict",
"=",
"update",
".",
"copy",
"(",
")",
"if",
"update",
"else",
"{",
"}",
"for",
"r",
"in",
"convtable",
":",
"if",
"':uni'",
"in",
"r",
":",
"if... | Convert a list of conversion dict to a dict for a certain locale.
>>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items())
[('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')] | [
"Convert",
"a",
"list",
"of",
"conversion",
"dict",
"to",
"a",
"dict",
"for",
"a",
"certain",
"locale",
"."
] | 925c0f9494f3439bc05526e7e89bb5f0ab3d185e | https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L176-L196 | train | 52,343 |
gumblex/zhconv | zhconv/zhconv.py | tokenize | def tokenize(s, locale, update=None):
"""
Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing.
"""
zhdict = getdict(locale)
pfset = pfsdict[locale]
if update:
zhdict = zhdict.copy()
zhdict.update(update)
newset = set()
for word in update:
for ch in range(len(word)):
newset.add(word[:ch+1])
pfset = pfset | newset
ch = []
N = len(s)
pos = 0
while pos < N:
i = pos
frag = s[pos]
maxword = None
maxpos = 0
while i < N and frag in pfset:
if frag in zhdict:
maxword = frag
maxpos = i
i += 1
frag = s[pos:i+1]
if maxword is None:
maxword = s[pos]
pos += 1
else:
pos = maxpos + 1
ch.append(maxword)
return ch | python | def tokenize(s, locale, update=None):
"""
Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing.
"""
zhdict = getdict(locale)
pfset = pfsdict[locale]
if update:
zhdict = zhdict.copy()
zhdict.update(update)
newset = set()
for word in update:
for ch in range(len(word)):
newset.add(word[:ch+1])
pfset = pfset | newset
ch = []
N = len(s)
pos = 0
while pos < N:
i = pos
frag = s[pos]
maxword = None
maxpos = 0
while i < N and frag in pfset:
if frag in zhdict:
maxword = frag
maxpos = i
i += 1
frag = s[pos:i+1]
if maxword is None:
maxword = s[pos]
pos += 1
else:
pos = maxpos + 1
ch.append(maxword)
return ch | [
"def",
"tokenize",
"(",
"s",
",",
"locale",
",",
"update",
"=",
"None",
")",
":",
"zhdict",
"=",
"getdict",
"(",
"locale",
")",
"pfset",
"=",
"pfsdict",
"[",
"locale",
"]",
"if",
"update",
":",
"zhdict",
"=",
"zhdict",
".",
"copy",
"(",
")",
"zhdic... | Tokenize `s` according to corresponding locale dictionary.
Don't use this for serious text processing. | [
"Tokenize",
"s",
"according",
"to",
"corresponding",
"locale",
"dictionary",
".",
"Don",
"t",
"use",
"this",
"for",
"serious",
"text",
"processing",
"."
] | 925c0f9494f3439bc05526e7e89bb5f0ab3d185e | https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L198-L233 | train | 52,344 |
glasslion/django-qiniu-storage | qiniustorage/backends.py | get_qiniu_config | def get_qiniu_config(name, default=None):
"""
Get configuration variable from environment variable
or django setting.py
"""
config = os.environ.get(name, getattr(settings, name, default))
if config is not None:
if isinstance(config, six.string_types):
return config.strip()
else:
return config
else:
raise ImproperlyConfigured(
"Can't find config for '%s' either in environment"
"variable or in setting.py" % name) | python | def get_qiniu_config(name, default=None):
"""
Get configuration variable from environment variable
or django setting.py
"""
config = os.environ.get(name, getattr(settings, name, default))
if config is not None:
if isinstance(config, six.string_types):
return config.strip()
else:
return config
else:
raise ImproperlyConfigured(
"Can't find config for '%s' either in environment"
"variable or in setting.py" % name) | [
"def",
"get_qiniu_config",
"(",
"name",
",",
"default",
"=",
"None",
")",
":",
"config",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"name",
",",
"getattr",
"(",
"settings",
",",
"name",
",",
"default",
")",
")",
"if",
"config",
"is",
"not",
"None",
... | Get configuration variable from environment variable
or django setting.py | [
"Get",
"configuration",
"variable",
"from",
"environment",
"variable",
"or",
"django",
"setting",
".",
"py"
] | b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad | https://github.com/glasslion/django-qiniu-storage/blob/b046ec0b67ebcf8cd9eb09c60f7db4a7e4fab7ad/qiniustorage/backends.py#L27-L41 | train | 52,345 |
ocaballeror/LyricFetch | lyricfetch/cli.py | load_from_file | def load_from_file(filename):
"""
Load a list of filenames from an external text file.
"""
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
if not os.path.isfile(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
try:
with open(filename, 'r') as sourcefile:
songs = [line.strip() for line in sourcefile]
except IOError as error:
logger.exception(error)
return None
songs = set(Song.from_filename(song) for song in songs)
return songs.difference({None}) | python | def load_from_file(filename):
"""
Load a list of filenames from an external text file.
"""
if os.path.isdir(filename):
logger.error("Err: File '%s' is a directory", filename)
return None
if not os.path.isfile(filename):
logger.error("Err: File '%s' does not exist", filename)
return None
try:
with open(filename, 'r') as sourcefile:
songs = [line.strip() for line in sourcefile]
except IOError as error:
logger.exception(error)
return None
songs = set(Song.from_filename(song) for song in songs)
return songs.difference({None}) | [
"def",
"load_from_file",
"(",
"filename",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"filename",
")",
":",
"logger",
".",
"error",
"(",
"\"Err: File '%s' is a directory\"",
",",
"filename",
")",
"return",
"None",
"if",
"not",
"os",
".",
"path",
... | Load a list of filenames from an external text file. | [
"Load",
"a",
"list",
"of",
"filenames",
"from",
"an",
"external",
"text",
"file",
"."
] | 86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/cli.py#L17-L35 | train | 52,346 |
ocaballeror/LyricFetch | lyricfetch/cli.py | parse_argv | def parse_argv():
"""
Parse command line arguments. Settings will be stored in the global
variables declared above.
"""
parser = argparse.ArgumentParser(description='Find lyrics for a set of mp3'
' files and embed them as metadata')
parser.add_argument('-j', '--jobs', help='Number of parallel processes',
type=int, metavar='N', default=1)
parser.add_argument('-o', '--overwrite', help='Overwrite lyrics of songs'
' that already have them', action='store_true')
parser.add_argument('-s', '--stats', help='Print a series of statistics at'
' the end of the execution', action='store_true')
parser.add_argument('-v', '--verbose', help='Set verbosity level (pass it'
' up to three times)', action='count')
parser.add_argument('-d', '--debug', help='Enable debug output',
action='store_true')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--recursive', help='Recursively search for'
' mp3 files', metavar='path', nargs='?', const='.')
group.add_argument('--from-file', help='Read a list of files from a text'
' file', type=str)
parser.add_argument('songs', help='The files/songs to search lyrics for',
nargs='*')
args = parser.parse_args()
CONFIG['overwrite'] = args.overwrite
CONFIG['print_stats'] = args.stats
if args.verbose is None or args.verbose == 0:
logger.setLevel(logging.CRITICAL)
elif args.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if args.jobs <= 0:
msg = 'Argument -j/--jobs should have a value greater than zero'
parser.error(msg)
else:
CONFIG['jobcount'] = args.jobs
songs = set()
if args.from_file:
songs = load_from_file(args.from_file)
if not songs:
raise ValueError('No file names found in file')
elif args.recursive:
mp3files = glob.iglob(args.recursive + '/**/*.mp3', recursive=True)
songs = set(Song.from_filename(f) for f in mp3files)
elif args.songs:
if os.path.exists(args.songs[0]):
parser = Song.from_filename
else:
parser = Song.from_string
songs.update(map(parser, args.songs))
else:
songs.add(get_current_song())
# Just in case some song constructors failed, remove all the Nones
return songs.difference({None}) | python | def parse_argv():
"""
Parse command line arguments. Settings will be stored in the global
variables declared above.
"""
parser = argparse.ArgumentParser(description='Find lyrics for a set of mp3'
' files and embed them as metadata')
parser.add_argument('-j', '--jobs', help='Number of parallel processes',
type=int, metavar='N', default=1)
parser.add_argument('-o', '--overwrite', help='Overwrite lyrics of songs'
' that already have them', action='store_true')
parser.add_argument('-s', '--stats', help='Print a series of statistics at'
' the end of the execution', action='store_true')
parser.add_argument('-v', '--verbose', help='Set verbosity level (pass it'
' up to three times)', action='count')
parser.add_argument('-d', '--debug', help='Enable debug output',
action='store_true')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--recursive', help='Recursively search for'
' mp3 files', metavar='path', nargs='?', const='.')
group.add_argument('--from-file', help='Read a list of files from a text'
' file', type=str)
parser.add_argument('songs', help='The files/songs to search lyrics for',
nargs='*')
args = parser.parse_args()
CONFIG['overwrite'] = args.overwrite
CONFIG['print_stats'] = args.stats
if args.verbose is None or args.verbose == 0:
logger.setLevel(logging.CRITICAL)
elif args.verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
if args.jobs <= 0:
msg = 'Argument -j/--jobs should have a value greater than zero'
parser.error(msg)
else:
CONFIG['jobcount'] = args.jobs
songs = set()
if args.from_file:
songs = load_from_file(args.from_file)
if not songs:
raise ValueError('No file names found in file')
elif args.recursive:
mp3files = glob.iglob(args.recursive + '/**/*.mp3', recursive=True)
songs = set(Song.from_filename(f) for f in mp3files)
elif args.songs:
if os.path.exists(args.songs[0]):
parser = Song.from_filename
else:
parser = Song.from_string
songs.update(map(parser, args.songs))
else:
songs.add(get_current_song())
# Just in case some song constructors failed, remove all the Nones
return songs.difference({None}) | [
"def",
"parse_argv",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Find lyrics for a set of mp3'",
"' files and embed them as metadata'",
")",
"parser",
".",
"add_argument",
"(",
"'-j'",
",",
"'--jobs'",
",",
"help",
"="... | Parse command line arguments. Settings will be stored in the global
variables declared above. | [
"Parse",
"command",
"line",
"arguments",
".",
"Settings",
"will",
"be",
"stored",
"in",
"the",
"global",
"variables",
"declared",
"above",
"."
] | 86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb | https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/cli.py#L38-L99 | train | 52,347 |
taskcluster/slugid.py | slugid/slugid.py | decode | def decode(slug):
"""
Returns the uuid.UUID object represented by the given v4 or "nice" slug
"""
if sys.version_info.major != 2 and isinstance(slug, bytes):
slug = slug.decode('ascii')
slug = slug + '==' # base64 padding
return uuid.UUID(bytes=base64.urlsafe_b64decode(slug)) | python | def decode(slug):
"""
Returns the uuid.UUID object represented by the given v4 or "nice" slug
"""
if sys.version_info.major != 2 and isinstance(slug, bytes):
slug = slug.decode('ascii')
slug = slug + '==' # base64 padding
return uuid.UUID(bytes=base64.urlsafe_b64decode(slug)) | [
"def",
"decode",
"(",
"slug",
")",
":",
"if",
"sys",
".",
"version_info",
".",
"major",
"!=",
"2",
"and",
"isinstance",
"(",
"slug",
",",
"bytes",
")",
":",
"slug",
"=",
"slug",
".",
"decode",
"(",
"'ascii'",
")",
"slug",
"=",
"slug",
"+",
"'=='",
... | Returns the uuid.UUID object represented by the given v4 or "nice" slug | [
"Returns",
"the",
"uuid",
".",
"UUID",
"object",
"represented",
"by",
"the",
"given",
"v4",
"or",
"nice",
"slug"
] | 7c2c58e79d8684a54c578302ad60b384e52bb09b | https://github.com/taskcluster/slugid.py/blob/7c2c58e79d8684a54c578302ad60b384e52bb09b/slugid/slugid.py#L24-L31 | train | 52,348 |
inodb/sufam | sufam/mutation.py | MutationsAtSinglePosition.filter_against_normal | def filter_against_normal(self, normal_mutations, maf_min=0.2,
maf_count_threshold=20, count_min=1):
"""Filters mutations that are in the given normal"""
assert(normal_mutations.chrom == self.chrom)
assert(normal_mutations.pos == self.pos)
assert(normal_mutations.ref == self.ref)
def passes_normal_criteria(mut):
return (mut.count >= maf_count_threshold and mut.maf > maf_min) or \
(mut.count < maf_count_threshold and mut.count > count_min)
nms = normal_mutations
muts = MutationsAtSinglePosition(self.chrom, self.pos, self.cov, self.ref)
for snv in self.snvs:
if not (snv in nms.snvs and passes_normal_criteria(nms.snvs[snv])):
muts.add_snv(self.snvs[snv])
for dlt in self.deletions:
if not (dlt in nms.deletions and passes_normal_criteria(nms.deletions[dlt])):
muts.add_deletion(self.deletions[dlt])
for ins in self.insertions:
if not (ins in nms.insertions and passes_normal_criteria(nms.insertions[ins])):
muts.add_insertion(self.insertions[ins])
return muts | python | def filter_against_normal(self, normal_mutations, maf_min=0.2,
maf_count_threshold=20, count_min=1):
"""Filters mutations that are in the given normal"""
assert(normal_mutations.chrom == self.chrom)
assert(normal_mutations.pos == self.pos)
assert(normal_mutations.ref == self.ref)
def passes_normal_criteria(mut):
return (mut.count >= maf_count_threshold and mut.maf > maf_min) or \
(mut.count < maf_count_threshold and mut.count > count_min)
nms = normal_mutations
muts = MutationsAtSinglePosition(self.chrom, self.pos, self.cov, self.ref)
for snv in self.snvs:
if not (snv in nms.snvs and passes_normal_criteria(nms.snvs[snv])):
muts.add_snv(self.snvs[snv])
for dlt in self.deletions:
if not (dlt in nms.deletions and passes_normal_criteria(nms.deletions[dlt])):
muts.add_deletion(self.deletions[dlt])
for ins in self.insertions:
if not (ins in nms.insertions and passes_normal_criteria(nms.insertions[ins])):
muts.add_insertion(self.insertions[ins])
return muts | [
"def",
"filter_against_normal",
"(",
"self",
",",
"normal_mutations",
",",
"maf_min",
"=",
"0.2",
",",
"maf_count_threshold",
"=",
"20",
",",
"count_min",
"=",
"1",
")",
":",
"assert",
"(",
"normal_mutations",
".",
"chrom",
"==",
"self",
".",
"chrom",
")",
... | Filters mutations that are in the given normal | [
"Filters",
"mutations",
"that",
"are",
"in",
"the",
"given",
"normal"
] | d4e41c5478ca9ba58be44d95106885c096c90a74 | https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/mutation.py#L55-L81 | train | 52,349 |
non-Jedi/gyr | gyr/server.py | Application.add_handlers | def add_handlers(self, room_handler=None, transaction_handler=None,
user_handler=None):
"""Adds routes to Application that use specified handlers."""
# Add all the normal matrix API routes
if room_handler:
room = resources.Room(room_handler,
self.Api)
self.add_route("/rooms/{room_alias}", room)
if transaction_handler:
transaction = resources.Transaction(transaction_handler,
self.Api)
self.add_route("/transactions/{txn_id}", transaction)
if user_handler:
user = resources.User(user_handler,
self.Api)
self.add_route("/users/{user_id}", user) | python | def add_handlers(self, room_handler=None, transaction_handler=None,
user_handler=None):
"""Adds routes to Application that use specified handlers."""
# Add all the normal matrix API routes
if room_handler:
room = resources.Room(room_handler,
self.Api)
self.add_route("/rooms/{room_alias}", room)
if transaction_handler:
transaction = resources.Transaction(transaction_handler,
self.Api)
self.add_route("/transactions/{txn_id}", transaction)
if user_handler:
user = resources.User(user_handler,
self.Api)
self.add_route("/users/{user_id}", user) | [
"def",
"add_handlers",
"(",
"self",
",",
"room_handler",
"=",
"None",
",",
"transaction_handler",
"=",
"None",
",",
"user_handler",
"=",
"None",
")",
":",
"# Add all the normal matrix API routes",
"if",
"room_handler",
":",
"room",
"=",
"resources",
".",
"Room",
... | Adds routes to Application that use specified handlers. | [
"Adds",
"routes",
"to",
"Application",
"that",
"use",
"specified",
"handlers",
"."
] | 9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e | https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/server.py#L34-L49 | train | 52,350 |
tipsi/tipsi_tools | tipsi_tools/monitoring.py | log_mon_value | def log_mon_value(name, value=1, **kwargs):
"""
simplest monitoring function to be aggregated with sum
"""
message = '{} => {}'.format(name, value)
log_mon.info({'metric_name': name, 'value': value, 'message': message, **kwargs}) | python | def log_mon_value(name, value=1, **kwargs):
"""
simplest monitoring function to be aggregated with sum
"""
message = '{} => {}'.format(name, value)
log_mon.info({'metric_name': name, 'value': value, 'message': message, **kwargs}) | [
"def",
"log_mon_value",
"(",
"name",
",",
"value",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"message",
"=",
"'{} => {}'",
".",
"format",
"(",
"name",
",",
"value",
")",
"log_mon",
".",
"info",
"(",
"{",
"'metric_name'",
":",
"name",
",",
"'value'... | simplest monitoring function to be aggregated with sum | [
"simplest",
"monitoring",
"function",
"to",
"be",
"aggregated",
"with",
"sum"
] | 1aba960c9890ceef2fb5e215b98b1646056ee58e | https://github.com/tipsi/tipsi_tools/blob/1aba960c9890ceef2fb5e215b98b1646056ee58e/tipsi_tools/monitoring.py#L11-L16 | train | 52,351 |
alfredodeza/notario | notario/store.py | create_store | def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store | python | def create_store():
"""
A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary
"""
new_storage = _proxy('store')
_state.store = type('store', (object,), {})
new_storage.store = dict()
return new_storage.store | [
"def",
"create_store",
"(",
")",
":",
"new_storage",
"=",
"_proxy",
"(",
"'store'",
")",
"_state",
".",
"store",
"=",
"type",
"(",
"'store'",
",",
"(",
"object",
",",
")",
",",
"{",
"}",
")",
"new_storage",
".",
"store",
"=",
"dict",
"(",
")",
"ret... | A helper for setting the _proxy and slapping the store
object for us.
:return: A thread-local storage as a dictionary | [
"A",
"helper",
"for",
"setting",
"the",
"_proxy",
"and",
"slapping",
"the",
"store",
"object",
"for",
"us",
"."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/store.py#L25-L35 | train | 52,352 |
craigahobbs/chisel | src/chisel/request.py | request | def request(request_callback=None, **kwargs):
"""
Chisel request decorator
"""
if request_callback is None:
return lambda fn: request(fn, **kwargs)
else:
return Request(request_callback, **kwargs).decorate_module(request_callback) | python | def request(request_callback=None, **kwargs):
"""
Chisel request decorator
"""
if request_callback is None:
return lambda fn: request(fn, **kwargs)
else:
return Request(request_callback, **kwargs).decorate_module(request_callback) | [
"def",
"request",
"(",
"request_callback",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"request_callback",
"is",
"None",
":",
"return",
"lambda",
"fn",
":",
"request",
"(",
"fn",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"Request",... | Chisel request decorator | [
"Chisel",
"request",
"decorator"
] | d306a9eae2ff757647c6ca1c933bc944efa5c326 | https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/request.py#L13-L21 | train | 52,353 |
Parsely/redis-fluster | fluster/penalty_box.py | PenaltyBox.add | def add(self, client):
"""Add a client to the penalty box."""
if client.pool_id in self._client_ids:
log.info("%r is already in the penalty box. Ignoring.", client)
return
release = time.time() + self._min_wait
heapq.heappush(self._clients, (release, (client, self._min_wait)))
self._client_ids.add(client.pool_id) | python | def add(self, client):
"""Add a client to the penalty box."""
if client.pool_id in self._client_ids:
log.info("%r is already in the penalty box. Ignoring.", client)
return
release = time.time() + self._min_wait
heapq.heappush(self._clients, (release, (client, self._min_wait)))
self._client_ids.add(client.pool_id) | [
"def",
"add",
"(",
"self",
",",
"client",
")",
":",
"if",
"client",
".",
"pool_id",
"in",
"self",
".",
"_client_ids",
":",
"log",
".",
"info",
"(",
"\"%r is already in the penalty box. Ignoring.\"",
",",
"client",
")",
"return",
"release",
"=",
"time",
".",
... | Add a client to the penalty box. | [
"Add",
"a",
"client",
"to",
"the",
"penalty",
"box",
"."
] | 9fb3ccdc3e0b24906520cac1e933a775e8dfbd99 | https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/penalty_box.py#L21-L28 | train | 52,354 |
Parsely/redis-fluster | fluster/penalty_box.py | PenaltyBox.get | def get(self):
"""Get any clients ready to be used.
:returns: Iterable of redis clients
"""
now = time.time()
while self._clients and self._clients[0][0] < now:
_, (client, last_wait) = heapq.heappop(self._clients)
connect_start = time.time()
try:
client.echo("test") # reconnected if this succeeds.
self._client_ids.remove(client.pool_id)
yield client
except (ConnectionError, TimeoutError):
timer = time.time() - connect_start
wait = min(int(last_wait * self._multiplier), self._max_wait)
heapq.heappush(self._clients, (time.time() + wait, (client, wait)))
log.info(
"%r is still down after a %s second attempt to connect. Retrying in %ss.",
client,
timer,
wait,
) | python | def get(self):
"""Get any clients ready to be used.
:returns: Iterable of redis clients
"""
now = time.time()
while self._clients and self._clients[0][0] < now:
_, (client, last_wait) = heapq.heappop(self._clients)
connect_start = time.time()
try:
client.echo("test") # reconnected if this succeeds.
self._client_ids.remove(client.pool_id)
yield client
except (ConnectionError, TimeoutError):
timer = time.time() - connect_start
wait = min(int(last_wait * self._multiplier), self._max_wait)
heapq.heappush(self._clients, (time.time() + wait, (client, wait)))
log.info(
"%r is still down after a %s second attempt to connect. Retrying in %ss.",
client,
timer,
wait,
) | [
"def",
"get",
"(",
"self",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"while",
"self",
".",
"_clients",
"and",
"self",
".",
"_clients",
"[",
"0",
"]",
"[",
"0",
"]",
"<",
"now",
":",
"_",
",",
"(",
"client",
",",
"last_wait",
")",
"... | Get any clients ready to be used.
:returns: Iterable of redis clients | [
"Get",
"any",
"clients",
"ready",
"to",
"be",
"used",
"."
] | 9fb3ccdc3e0b24906520cac1e933a775e8dfbd99 | https://github.com/Parsely/redis-fluster/blob/9fb3ccdc3e0b24906520cac1e933a775e8dfbd99/fluster/penalty_box.py#L30-L52 | train | 52,355 |
alfredodeza/notario | notario/validators/types.py | string | def string(_object):
"""
Validates a given input is of type string.
Example usage::
data = {'a' : 21}
schema = (string, 21)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, basestring), "not of type string")
return _validator(value)
return decorated
ensure(isinstance(_object, basestring), "not of type string") | python | def string(_object):
"""
Validates a given input is of type string.
Example usage::
data = {'a' : 21}
schema = (string, 21)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, basestring), "not of type string")
return _validator(value)
return decorated
ensure(isinstance(_object, basestring), "not of type string") | [
"def",
"string",
"(",
"_object",
")",
":",
"if",
"is_callable",
"(",
"_object",
")",
":",
"_validator",
"=",
"_object",
"@",
"wraps",
"(",
"_validator",
")",
"def",
"decorated",
"(",
"value",
")",
":",
"ensure",
"(",
"isinstance",
"(",
"value",
",",
"b... | Validates a given input is of type string.
Example usage::
data = {'a' : 21}
schema = (string, 21)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function. | [
"Validates",
"a",
"given",
"input",
"is",
"of",
"type",
"string",
"."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L10-L34 | train | 52,356 |
alfredodeza/notario | notario/validators/types.py | boolean | def boolean(_object):
"""
Validates a given input is of type boolean.
Example usage::
data = {'a' : True}
schema = ('a', boolean)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, bool), "not of type boolean")
return _validator(value)
return decorated
ensure(isinstance(_object, bool), "not of type boolean") | python | def boolean(_object):
"""
Validates a given input is of type boolean.
Example usage::
data = {'a' : True}
schema = ('a', boolean)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, bool), "not of type boolean")
return _validator(value)
return decorated
ensure(isinstance(_object, bool), "not of type boolean") | [
"def",
"boolean",
"(",
"_object",
")",
":",
"if",
"is_callable",
"(",
"_object",
")",
":",
"_validator",
"=",
"_object",
"@",
"wraps",
"(",
"_validator",
")",
"def",
"decorated",
"(",
"value",
")",
":",
"ensure",
"(",
"isinstance",
"(",
"value",
",",
"... | Validates a given input is of type boolean.
Example usage::
data = {'a' : True}
schema = ('a', boolean)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function. | [
"Validates",
"a",
"given",
"input",
"is",
"of",
"type",
"boolean",
"."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L37-L62 | train | 52,357 |
alfredodeza/notario | notario/validators/types.py | dictionary | def dictionary(_object, *args):
"""
Validates a given input is of type dictionary.
Example usage::
data = {'a' : {'b': 1}}
schema = ('a', dictionary)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
error_msg = 'not of type dictionary'
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, dict), error_msg)
return _validator(value)
return decorated
try:
ensure(isinstance(_object, dict), error_msg)
except AssertionError:
if args:
msg = 'did not pass validation against callable: dictionary'
raise Invalid('', msg=msg, reason=error_msg, *args)
raise | python | def dictionary(_object, *args):
"""
Validates a given input is of type dictionary.
Example usage::
data = {'a' : {'b': 1}}
schema = ('a', dictionary)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
error_msg = 'not of type dictionary'
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, dict), error_msg)
return _validator(value)
return decorated
try:
ensure(isinstance(_object, dict), error_msg)
except AssertionError:
if args:
msg = 'did not pass validation against callable: dictionary'
raise Invalid('', msg=msg, reason=error_msg, *args)
raise | [
"def",
"dictionary",
"(",
"_object",
",",
"*",
"args",
")",
":",
"error_msg",
"=",
"'not of type dictionary'",
"if",
"is_callable",
"(",
"_object",
")",
":",
"_validator",
"=",
"_object",
"@",
"wraps",
"(",
"_validator",
")",
"def",
"decorated",
"(",
"value"... | Validates a given input is of type dictionary.
Example usage::
data = {'a' : {'b': 1}}
schema = ('a', dictionary)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function. | [
"Validates",
"a",
"given",
"input",
"is",
"of",
"type",
"dictionary",
"."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L66-L98 | train | 52,358 |
alfredodeza/notario | notario/validators/types.py | array | def array(_object):
"""
Validates a given input is of type list.
Example usage::
data = {'a' : [1,2]}
schema = ('a', array)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, list), "not of type array")
return _validator(value)
return decorated
ensure(isinstance(_object, list), "not of type array") | python | def array(_object):
"""
Validates a given input is of type list.
Example usage::
data = {'a' : [1,2]}
schema = ('a', array)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, list), "not of type array")
return _validator(value)
return decorated
ensure(isinstance(_object, list), "not of type array") | [
"def",
"array",
"(",
"_object",
")",
":",
"if",
"is_callable",
"(",
"_object",
")",
":",
"_validator",
"=",
"_object",
"@",
"wraps",
"(",
"_validator",
")",
"def",
"decorated",
"(",
"value",
")",
":",
"ensure",
"(",
"isinstance",
"(",
"value",
",",
"li... | Validates a given input is of type list.
Example usage::
data = {'a' : [1,2]}
schema = ('a', array)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function. | [
"Validates",
"a",
"given",
"input",
"is",
"of",
"type",
"list",
"."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L101-L126 | train | 52,359 |
alfredodeza/notario | notario/validators/types.py | integer | def integer(_object):
"""
Validates a given input is of type int..
Example usage::
data = {'a' : 21}
schema = ('a', integer)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, int), "not of type int")
return _validator(value)
return decorated
ensure(isinstance(_object, int), "not of type int") | python | def integer(_object):
"""
Validates a given input is of type int..
Example usage::
data = {'a' : 21}
schema = ('a', integer)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function.
"""
if is_callable(_object):
_validator = _object
@wraps(_validator)
def decorated(value):
ensure(isinstance(value, int), "not of type int")
return _validator(value)
return decorated
ensure(isinstance(_object, int), "not of type int") | [
"def",
"integer",
"(",
"_object",
")",
":",
"if",
"is_callable",
"(",
"_object",
")",
":",
"_validator",
"=",
"_object",
"@",
"wraps",
"(",
"_validator",
")",
"def",
"decorated",
"(",
"value",
")",
":",
"ensure",
"(",
"isinstance",
"(",
"value",
",",
"... | Validates a given input is of type int..
Example usage::
data = {'a' : 21}
schema = ('a', integer)
You can also use this as a decorator, as a way to check for the
input before it even hits a validator you may be writing.
.. note::
If the argument is a callable, the decorating behavior will be
triggered, otherwise it will act as a normal function. | [
"Validates",
"a",
"given",
"input",
"is",
"of",
"type",
"int",
".."
] | d5dc2edfcb75d9291ced3f2551f368c35dd31475 | https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/types.py#L129-L153 | train | 52,360 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.constant | def constant(cls,
value: Value,
dtype: tf.DType = tf.float32) -> 'TensorFluent':
'''Returns a constant `value` TensorFluent with given `dtype`.
Args:
value: The constant value.
dtype: The output's data type.
Returns:
A constant TensorFluent.
'''
t = tf.constant(value, dtype=dtype)
scope = [] # type: List
batch = False
return TensorFluent(t, scope, batch=batch) | python | def constant(cls,
value: Value,
dtype: tf.DType = tf.float32) -> 'TensorFluent':
'''Returns a constant `value` TensorFluent with given `dtype`.
Args:
value: The constant value.
dtype: The output's data type.
Returns:
A constant TensorFluent.
'''
t = tf.constant(value, dtype=dtype)
scope = [] # type: List
batch = False
return TensorFluent(t, scope, batch=batch) | [
"def",
"constant",
"(",
"cls",
",",
"value",
":",
"Value",
",",
"dtype",
":",
"tf",
".",
"DType",
"=",
"tf",
".",
"float32",
")",
"->",
"'TensorFluent'",
":",
"t",
"=",
"tf",
".",
"constant",
"(",
"value",
",",
"dtype",
"=",
"dtype",
")",
"scope",
... | Returns a constant `value` TensorFluent with given `dtype`.
Args:
value: The constant value.
dtype: The output's data type.
Returns:
A constant TensorFluent. | [
"Returns",
"a",
"constant",
"value",
"TensorFluent",
"with",
"given",
"dtype",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L67-L82 | train | 52,361 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.Bernoulli | def Bernoulli(cls,
mean: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.
Args:
mean: The mean parameter of the Bernoulli distribution.
batch_size: The size of the batch (optional).
Returns:
The Bernoulli distribution and a TensorFluent sample drawn from the distribution.
'''
probs = mean.tensor
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
batch = mean.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | python | def Bernoulli(cls,
mean: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.
Args:
mean: The mean parameter of the Bernoulli distribution.
batch_size: The size of the batch (optional).
Returns:
The Bernoulli distribution and a TensorFluent sample drawn from the distribution.
'''
probs = mean.tensor
dist = tf.distributions.Bernoulli(probs=probs, dtype=tf.bool)
batch = mean.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | [
"def",
"Bernoulli",
"(",
"cls",
",",
"mean",
":",
"'TensorFluent'",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Distribution",
",",
"'TensorFluent'",
"]",
":",
"probs",
"=",
"mean",
".",
"tensor",
"dist",
... | Returns a TensorFluent for the Bernoulli sampling op with given mean parameter.
Args:
mean: The mean parameter of the Bernoulli distribution.
batch_size: The size of the batch (optional).
Returns:
The Bernoulli distribution and a TensorFluent sample drawn from the distribution. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"Bernoulli",
"sampling",
"op",
"with",
"given",
"mean",
"parameter",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L85-L106 | train | 52,362 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.Uniform | def Uniform(cls,
low: 'TensorFluent', high: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Uniform sampling op with given low and high parameters.
Args:
low: The low parameter of the Uniform distribution.
high: The high parameter of the Uniform distribution.
batch_size: The size of the batch (optional).
Returns:
The Uniform distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if low.scope != high.scope:
raise ValueError('Uniform distribution: parameters must have same scope!')
dist = tf.distributions.Uniform(low.tensor, high.tensor)
batch = low.batch or high.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = low.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | python | def Uniform(cls,
low: 'TensorFluent', high: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Uniform sampling op with given low and high parameters.
Args:
low: The low parameter of the Uniform distribution.
high: The high parameter of the Uniform distribution.
batch_size: The size of the batch (optional).
Returns:
The Uniform distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if low.scope != high.scope:
raise ValueError('Uniform distribution: parameters must have same scope!')
dist = tf.distributions.Uniform(low.tensor, high.tensor)
batch = low.batch or high.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = low.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | [
"def",
"Uniform",
"(",
"cls",
",",
"low",
":",
"'TensorFluent'",
",",
"high",
":",
"'TensorFluent'",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Distribution",
",",
"'TensorFluent'",
"]",
":",
"if",
"low",
... | Returns a TensorFluent for the Uniform sampling op with given low and high parameters.
Args:
low: The low parameter of the Uniform distribution.
high: The high parameter of the Uniform distribution.
batch_size: The size of the batch (optional).
Returns:
The Uniform distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"Uniform",
"sampling",
"op",
"with",
"given",
"low",
"and",
"high",
"parameters",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L109-L135 | train | 52,363 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.Normal | def Normal(cls,
mean: 'TensorFluent', variance: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Normal sampling op with given mean and variance.
Args:
mean: The mean parameter of the Normal distribution.
variance: The variance parameter of the Normal distribution.
batch_size: The size of the batch (optional).
Returns:
The Normal distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if mean.scope != variance.scope:
raise ValueError('Normal distribution: parameters must have same scope!')
loc = mean.tensor
scale = tf.sqrt(variance.tensor)
dist = tf.distributions.Normal(loc, scale)
batch = mean.batch or variance.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | python | def Normal(cls,
mean: 'TensorFluent', variance: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Normal sampling op with given mean and variance.
Args:
mean: The mean parameter of the Normal distribution.
variance: The variance parameter of the Normal distribution.
batch_size: The size of the batch (optional).
Returns:
The Normal distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if mean.scope != variance.scope:
raise ValueError('Normal distribution: parameters must have same scope!')
loc = mean.tensor
scale = tf.sqrt(variance.tensor)
dist = tf.distributions.Normal(loc, scale)
batch = mean.batch or variance.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | [
"def",
"Normal",
"(",
"cls",
",",
"mean",
":",
"'TensorFluent'",
",",
"variance",
":",
"'TensorFluent'",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Distribution",
",",
"'TensorFluent'",
"]",
":",
"if",
"me... | Returns a TensorFluent for the Normal sampling op with given mean and variance.
Args:
mean: The mean parameter of the Normal distribution.
variance: The variance parameter of the Normal distribution.
batch_size: The size of the batch (optional).
Returns:
The Normal distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"Normal",
"sampling",
"op",
"with",
"given",
"mean",
"and",
"variance",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L138-L166 | train | 52,364 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.Gamma | def Gamma(cls,
shape: 'TensorFluent',
scale: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.
Args:
shape: The shape parameter of the Gamma distribution.
scale: The scale parameter of the Gamma distribution.
batch_size: The size of the batch (optional).
Returns:
The Gamma distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if shape.scope != scale.scope:
raise ValueError('Gamma distribution: parameters must have same scope!')
concentration = shape.tensor
rate = 1 / scale.tensor
dist = tf.distributions.Gamma(concentration, rate)
batch = shape.batch or scale.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = shape.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | python | def Gamma(cls,
shape: 'TensorFluent',
scale: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.
Args:
shape: The shape parameter of the Gamma distribution.
scale: The scale parameter of the Gamma distribution.
batch_size: The size of the batch (optional).
Returns:
The Gamma distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope.
'''
if shape.scope != scale.scope:
raise ValueError('Gamma distribution: parameters must have same scope!')
concentration = shape.tensor
rate = 1 / scale.tensor
dist = tf.distributions.Gamma(concentration, rate)
batch = shape.batch or scale.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = shape.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | [
"def",
"Gamma",
"(",
"cls",
",",
"shape",
":",
"'TensorFluent'",
",",
"scale",
":",
"'TensorFluent'",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Distribution",
",",
"'TensorFluent'",
"]",
":",
"if",
"shape... | Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters.
Args:
shape: The shape parameter of the Gamma distribution.
scale: The scale parameter of the Gamma distribution.
batch_size: The size of the batch (optional).
Returns:
The Gamma distribution and a TensorFluent sample drawn from the distribution.
Raises:
ValueError: If parameters do not have the same scope. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"Gamma",
"sampling",
"op",
"with",
"given",
"shape",
"and",
"scale",
"parameters",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L200-L229 | train | 52,365 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.Exponential | def Exponential(cls,
mean: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Exponential sampling op with given mean parameter.
Args:
mean: The mean parameter of the Exponential distribution.
batch_size: The size of the batch (optional).
Returns:
The Exponential distribution and a TensorFluent sample drawn from the distribution.
'''
rate = 1 / mean.tensor
dist = tf.distributions.Exponential(rate)
batch = mean.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | python | def Exponential(cls,
mean: 'TensorFluent',
batch_size: Optional[int] = None) -> Tuple[Distribution, 'TensorFluent']:
'''Returns a TensorFluent for the Exponential sampling op with given mean parameter.
Args:
mean: The mean parameter of the Exponential distribution.
batch_size: The size of the batch (optional).
Returns:
The Exponential distribution and a TensorFluent sample drawn from the distribution.
'''
rate = 1 / mean.tensor
dist = tf.distributions.Exponential(rate)
batch = mean.batch
if not batch and batch_size is not None:
t = dist.sample(batch_size)
batch = True
else:
t = dist.sample()
scope = mean.scope.as_list()
return (dist, TensorFluent(t, scope, batch=batch)) | [
"def",
"Exponential",
"(",
"cls",
",",
"mean",
":",
"'TensorFluent'",
",",
"batch_size",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
"->",
"Tuple",
"[",
"Distribution",
",",
"'TensorFluent'",
"]",
":",
"rate",
"=",
"1",
"/",
"mean",
".",
"tenso... | Returns a TensorFluent for the Exponential sampling op with given mean parameter.
Args:
mean: The mean parameter of the Exponential distribution.
batch_size: The size of the batch (optional).
Returns:
The Exponential distribution and a TensorFluent sample drawn from the distribution. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"Exponential",
"sampling",
"op",
"with",
"given",
"mean",
"parameter",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L232-L253 | train | 52,366 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.stop_gradient | def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a copy of the input fluent with stop_gradient at tensor level.
Args:
x: The input fluent.
Returns:
A TensorFluent that stops backpropagation of gradient computations.
'''
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(tf.stop_gradient(x.tensor), scope, batch) | python | def stop_gradient(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a copy of the input fluent with stop_gradient at tensor level.
Args:
x: The input fluent.
Returns:
A TensorFluent that stops backpropagation of gradient computations.
'''
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(tf.stop_gradient(x.tensor), scope, batch) | [
"def",
"stop_gradient",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"scope",
"=",
"x",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"x",
".",
"batch",
"return",
"TensorFluent",
"(",
"tf",
".",
"stop_gradient",
... | Returns a copy of the input fluent with stop_gradient at tensor level.
Args:
x: The input fluent.
Returns:
A TensorFluent that stops backpropagation of gradient computations. | [
"Returns",
"a",
"copy",
"of",
"the",
"input",
"fluent",
"with",
"stop_gradient",
"at",
"tensor",
"level",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L256-L267 | train | 52,367 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.stop_batch_gradient | def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent':
'''Returns a copy of the inputs fluent with stop_gradient applied at batch level.
Args:
x: The input fluent.
stop_batch: A boolean tf.Tensor with shape=(batch_size, ...)
Returns:
A TensorFluent that conditionally stops backpropagation of gradient computations.
'''
scope = x.scope.as_list()
batch = x.batch
tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor)
return TensorFluent(tensor, scope, batch) | python | def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent':
'''Returns a copy of the inputs fluent with stop_gradient applied at batch level.
Args:
x: The input fluent.
stop_batch: A boolean tf.Tensor with shape=(batch_size, ...)
Returns:
A TensorFluent that conditionally stops backpropagation of gradient computations.
'''
scope = x.scope.as_list()
batch = x.batch
tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor)
return TensorFluent(tensor, scope, batch) | [
"def",
"stop_batch_gradient",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"stop_batch",
":",
"tf",
".",
"Tensor",
")",
"->",
"'TensorFluent'",
":",
"scope",
"=",
"x",
".",
"scope",
".",
"as_list",
"(",
")",
"batch",
"=",
"x",
".",
"batch",
"tenso... | Returns a copy of the inputs fluent with stop_gradient applied at batch level.
Args:
x: The input fluent.
stop_batch: A boolean tf.Tensor with shape=(batch_size, ...)
Returns:
A TensorFluent that conditionally stops backpropagation of gradient computations. | [
"Returns",
"a",
"copy",
"of",
"the",
"inputs",
"fluent",
"with",
"stop_gradient",
"applied",
"at",
"batch",
"level",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L270-L283 | train | 52,368 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.abs | def abs(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function.
'''
return cls._unary_op(x, tf.abs, tf.float32) | python | def abs(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function.
'''
return cls._unary_op(x, tf.abs, tf.float32) | [
"def",
"abs",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"abs",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the abs function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the abs function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"abs",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L286-L295 | train | 52,369 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.exp | def exp(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the exp function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the exp function.
'''
return cls._unary_op(x, tf.exp, tf.float32) | python | def exp(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the exp function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the exp function.
'''
return cls._unary_op(x, tf.exp, tf.float32) | [
"def",
"exp",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"exp",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the exp function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the exp function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"exp",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L298-L307 | train | 52,370 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.log | def log(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the log function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the log function.
'''
return cls._unary_op(x, tf.log, tf.float32) | python | def log(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the log function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the log function.
'''
return cls._unary_op(x, tf.log, tf.float32) | [
"def",
"log",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"log",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the log function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the log function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"log",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L310-L319 | train | 52,371 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.sqrt | def sqrt(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the sqrt function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sqrt function.
'''
return cls._unary_op(x, tf.sqrt, tf.float32) | python | def sqrt(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the sqrt function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sqrt function.
'''
return cls._unary_op(x, tf.sqrt, tf.float32) | [
"def",
"sqrt",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"sqrt",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the sqrt function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sqrt function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"sqrt",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L322-L331 | train | 52,372 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.cos | def cos(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the cos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the cos function.
'''
return cls._unary_op(x, tf.cos, tf.float32) | python | def cos(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the cos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the cos function.
'''
return cls._unary_op(x, tf.cos, tf.float32) | [
"def",
"cos",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"cos",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the cos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the cos function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"cos",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L334-L343 | train | 52,373 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.sin | def sin(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the sin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sin function.
'''
return cls._unary_op(x, tf.sin, tf.float32) | python | def sin(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the sin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sin function.
'''
return cls._unary_op(x, tf.sin, tf.float32) | [
"def",
"sin",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"sin",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the sin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the sin function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"sin",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L346-L355 | train | 52,374 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.tan | def tan(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the tan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the tan function.
'''
return cls._unary_op(x, tf.tan, tf.float32) | python | def tan(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the tan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the tan function.
'''
return cls._unary_op(x, tf.tan, tf.float32) | [
"def",
"tan",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"tan",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the tan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the tan function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"tan",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L358-L367 | train | 52,375 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.acos | def acos(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arccos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arccos function.
'''
return cls._unary_op(x, tf.acos, tf.float32) | python | def acos(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arccos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arccos function.
'''
return cls._unary_op(x, tf.acos, tf.float32) | [
"def",
"acos",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"acos",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the arccos function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arccos function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"arccos",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L370-L379 | train | 52,376 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.asin | def asin(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arcsin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arcsin function.
'''
return cls._unary_op(x, tf.asin, tf.float32) | python | def asin(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arcsin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arcsin function.
'''
return cls._unary_op(x, tf.asin, tf.float32) | [
"def",
"asin",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"asin",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the arcsin function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arcsin function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"arcsin",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L382-L391 | train | 52,377 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.atan | def atan(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arctan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arctan function.
'''
return cls._unary_op(x, tf.atan2, tf.float32) | python | def atan(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the arctan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arctan function.
'''
return cls._unary_op(x, tf.atan2, tf.float32) | [
"def",
"atan",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"atan2",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the arctan function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the arctan function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"arctan",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L394-L403 | train | 52,378 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.round | def round(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
'''
return cls._unary_op(x, tf.round, tf.float32) | python | def round(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
'''
return cls._unary_op(x, tf.round, tf.float32) | [
"def",
"round",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"round",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"round",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L406-L415 | train | 52,379 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.ceil | def ceil(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the ceil function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the ceil function.
'''
return cls._unary_op(x, tf.ceil, tf.float32) | python | def ceil(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the ceil function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the ceil function.
'''
return cls._unary_op(x, tf.ceil, tf.float32) | [
"def",
"ceil",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"ceil",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the ceil function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the ceil function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"ceil",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L418-L427 | train | 52,380 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.floor | def floor(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the floor function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the floor function.
'''
return cls._unary_op(x, tf.floor, tf.float32) | python | def floor(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the floor function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the floor function.
'''
return cls._unary_op(x, tf.floor, tf.float32) | [
"def",
"floor",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_unary_op",
"(",
"x",
",",
"tf",
".",
"floor",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the floor function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the floor function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"floor",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L430-L439 | train | 52,381 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.pow | def pow(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the pow function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the pow function.
'''
return cls._binary_op(x, y, tf.pow, tf.float32) | python | def pow(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the pow function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the pow function.
'''
return cls._binary_op(x, y, tf.pow, tf.float32) | [
"def",
"pow",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_binary_op",
"(",
"x",
",",
"y",
",",
"tf",
".",
"pow",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the pow function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the pow function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"pow",
"function",
".",
"TensorFluent"
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L442-L452 | train | 52,382 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.max | def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the maximum function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the maximum function.
'''
return cls._binary_op(x, y, tf.maximum, tf.float32) | python | def max(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the maximum function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the maximum function.
'''
return cls._binary_op(x, y, tf.maximum, tf.float32) | [
"def",
"max",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_binary_op",
"(",
"x",
",",
"y",
",",
"tf",
".",
"maximum",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the maximum function.TensorFluent
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the maximum function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"maximum",
"function",
".",
"TensorFluent"
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L455-L465 | train | 52,383 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.min | def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
'''
return cls._binary_op(x, y, tf.minimum, tf.float32) | python | def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
'''
return cls._binary_op(x, y, tf.minimum, tf.float32) | [
"def",
"min",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"return",
"cls",
".",
"_binary_op",
"(",
"x",
",",
"y",
",",
"tf",
".",
"minimum",
",",
"tf",
".",
"float32",
")"
] | Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"minimum",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L468-L478 | train | 52,384 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.if_then_else | def if_then_else(cls,
condition: 'TensorFluent',
true_case: 'TensorFluent',
false_case: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape.
'''
true = TensorFluent.constant(True, tf.bool)
false = TensorFluent.constant(False, tf.bool)
ite = (condition == true) * true_case + (condition == false) * false_case
if true_case.dtype == tf.bool and false_case.dtype == tf.bool:
ite = ite.cast(tf.bool)
return ite | python | def if_then_else(cls,
condition: 'TensorFluent',
true_case: 'TensorFluent',
false_case: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape.
'''
true = TensorFluent.constant(True, tf.bool)
false = TensorFluent.constant(False, tf.bool)
ite = (condition == true) * true_case + (condition == false) * false_case
if true_case.dtype == tf.bool and false_case.dtype == tf.bool:
ite = ite.cast(tf.bool)
return ite | [
"def",
"if_then_else",
"(",
"cls",
",",
"condition",
":",
"'TensorFluent'",
",",
"true_case",
":",
"'TensorFluent'",
",",
"false_case",
":",
"'TensorFluent'",
")",
"->",
"'TensorFluent'",
":",
"true",
"=",
"TensorFluent",
".",
"constant",
"(",
"True",
",",
"tf... | Returns a TensorFluent for the control op if-then-else.
Args:
condition: Boolean fluent for the if condition.
true_case: Fluent returned in the true clause.
false_case: Fluent returned in the false clause.
Returns:
A TensorFluent wrapping the if-then-else control statement.
Raises:
ValueError: If cases don't have same shape. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"control",
"op",
"if",
"-",
"then",
"-",
"else",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L481-L503 | train | 52,385 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._binary_op | def _binary_op(cls,
x: 'TensorFluent',
y: 'TensorFluent',
op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output.
'''
# scope
s1 = x.scope.as_list()
s2 = y.scope.as_list()
scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)
if x.batch and perm1 != []:
perm1 = [0] + [p+1 for p in perm1]
if y.batch and perm2 != []:
perm2 = [0] + [p+1 for p in perm2]
x = x.transpose(perm1)
y = y.transpose(perm2)
# shape
reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)
if reshape1 is not None:
x = x.reshape(reshape1)
if reshape2 is not None:
y = y.reshape(reshape2)
# dtype
x = x.cast(dtype)
y = y.cast(dtype)
# operation
t = op(x.tensor, y.tensor)
# batch
batch = x.batch or y.batch
return TensorFluent(t, scope, batch=batch) | python | def _binary_op(cls,
x: 'TensorFluent',
y: 'TensorFluent',
op: Callable[[tf.Tensor, tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output.
'''
# scope
s1 = x.scope.as_list()
s2 = y.scope.as_list()
scope, perm1, perm2 = TensorFluentScope.broadcast(s1, s2)
if x.batch and perm1 != []:
perm1 = [0] + [p+1 for p in perm1]
if y.batch and perm2 != []:
perm2 = [0] + [p+1 for p in perm2]
x = x.transpose(perm1)
y = y.transpose(perm2)
# shape
reshape1, reshape2 = TensorFluentShape.broadcast(x.shape, y.shape)
if reshape1 is not None:
x = x.reshape(reshape1)
if reshape2 is not None:
y = y.reshape(reshape2)
# dtype
x = x.cast(dtype)
y = y.cast(dtype)
# operation
t = op(x.tensor, y.tensor)
# batch
batch = x.batch or y.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_binary_op",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"y",
":",
"'TensorFluent'",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
",",
"tf",
".",
"Tensor",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"dtype",
":",
"tf",
"... | Returns a TensorFluent for the binary `op` applied to fluents `x` and `y`.
Args:
x: The first operand.
y: The second operand.
op: The binary operator.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the binary operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"binary",
"op",
"applied",
"to",
"fluents",
"x",
"and",
"y",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L506-L550 | train | 52,386 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._unary_op | def _unary_op(cls,
x: 'TensorFluent',
op: Callable[[tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
'''
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch) | python | def _unary_op(cls,
x: 'TensorFluent',
op: Callable[[tf.Tensor], tf.Tensor],
dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output.
'''
x = x.cast(dtype)
t = op(x.tensor)
scope = x.scope.as_list()
batch = x.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_unary_op",
"(",
"cls",
",",
"x",
":",
"'TensorFluent'",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"dtype",
":",
"tf",
".",
"DType",
")",
"->",
"'TensorFluent'",
":",
"x",
"=",
"x"... | Returns a TensorFluent for the unary `op` applied to fluent `x`.
Args:
x: The input fluent.
op: The unary operation.
dtype: The output's data type.
Returns:
A TensorFluent wrapping the unary operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"unary",
"op",
"applied",
"to",
"fluent",
"x",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L553-L571 | train | 52,387 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._aggregation_op | def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
'''Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
'''
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch) | python | def _aggregation_op(cls,
op: Callable[[tf.Tensor, Optional[Sequence[int]]], tf.Tensor],
x: 'TensorFluent',
vars_list: List[str]) -> 'TensorFluent':
'''Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output.
'''
axis = cls._varslist2axis(x, vars_list)
t = op(x.tensor, axis)
scope = []
for var in x.scope.as_list():
if var not in vars_list:
scope.append(var)
batch = x.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"_aggregation_op",
"(",
"cls",
",",
"op",
":",
"Callable",
"[",
"[",
"tf",
".",
"Tensor",
",",
"Optional",
"[",
"Sequence",
"[",
"int",
"]",
"]",
"]",
",",
"tf",
".",
"Tensor",
"]",
",",
"x",
":",
"'TensorFluent'",
",",
"vars_list",
":",
"Li... | Returns a TensorFluent for the aggregation `op` applied to fluent `x`.
Args:
op: The aggregation operation.
x: The input fluent.
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the aggregation operator's output. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"aggregation",
"op",
"applied",
"to",
"fluent",
"x",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L574-L598 | train | 52,388 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent._varslist2axis | def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
'''Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
'''
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | python | def _varslist2axis(cls, fluent: 'TensorFluent', vars_list: List[str]) -> List[int]:
'''Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis.
'''
axis = []
for var in vars_list:
if var in fluent.scope.as_list():
ax = fluent.scope.index(var)
if fluent.batch:
ax += 1
axis.append(ax)
return axis | [
"def",
"_varslist2axis",
"(",
"cls",
",",
"fluent",
":",
"'TensorFluent'",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"List",
"[",
"int",
"]",
":",
"axis",
"=",
"[",
"]",
"for",
"var",
"in",
"vars_list",
":",
"if",
"var",
"in",
"flu... | Maps the `vars_list` into a list of axis indices
corresponding to the `fluent` scope.
Args:
x: The fluent.
vars_list: The list of variables to be aggregated over.
Returns:
List[int]: a list of axis. | [
"Maps",
"the",
"vars_list",
"into",
"a",
"list",
"of",
"axis",
"indices",
"corresponding",
"to",
"the",
"fluent",
"scope",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L601-L619 | train | 52,389 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.cast | def cast(self, dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
'''
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def cast(self, dtype: tf.DType) -> 'TensorFluent':
'''Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation.
'''
if self.dtype == dtype:
return self
t = tf.cast(self.tensor, dtype)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"cast",
"(",
"self",
",",
"dtype",
":",
"tf",
".",
"DType",
")",
"->",
"'TensorFluent'",
":",
"if",
"self",
".",
"dtype",
"==",
"dtype",
":",
"return",
"self",
"t",
"=",
"tf",
".",
"cast",
"(",
"self",
".",
"tensor",
",",
"dtype",
")",
"sc... | Returns a TensorFluent for the cast operation with given `dtype`.
Args:
dtype: The output's data type.
Returns:
A TensorFluent wrapping the cast operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"cast",
"operation",
"with",
"given",
"dtype",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L622-L636 | train | 52,390 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.reshape | def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
'''Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
'''
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def reshape(self, shape: tf.TensorShape) -> 'TensorFluent':
'''Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation.
'''
t = tf.reshape(self.tensor, shape)
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"reshape",
"(",
"self",
",",
"shape",
":",
"tf",
".",
"TensorShape",
")",
"->",
"'TensorFluent'",
":",
"t",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"tensor",
",",
"shape",
")",
"scope",
"=",
"self",
".",
"scope",
".",
"as_list",
"(",
")... | Returns a TensorFluent for the reshape operation with given `shape`.
Args:
shape: The output's shape.
Returns:
A TensorFluent wrapping the reshape operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"reshape",
"operation",
"with",
"given",
"shape",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L638-L650 | train | 52,391 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.transpose | def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':
'''Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
'''
if permutation == []:
return self
t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | python | def transpose(self, permutation: Optional[List[int]] = None) -> 'TensorFluent':
'''Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation.
'''
if permutation == []:
return self
t = tf.transpose(self.tensor, permutation) if permutation != [] else self.tensor
scope = self.scope.as_list()
batch = self.batch
return TensorFluent(t, scope, batch=batch) | [
"def",
"transpose",
"(",
"self",
",",
"permutation",
":",
"Optional",
"[",
"List",
"[",
"int",
"]",
"]",
"=",
"None",
")",
"->",
"'TensorFluent'",
":",
"if",
"permutation",
"==",
"[",
"]",
":",
"return",
"self",
"t",
"=",
"tf",
".",
"transpose",
"(",... | Returns a TensorFluent for the transpose operation with given `permutation`.
Args:
permutation: The output's shape permutation.
Returns:
A TensorFluent wrapping the transpose operation. | [
"Returns",
"a",
"TensorFluent",
"for",
"the",
"transpose",
"operation",
"with",
"given",
"permutation",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L652-L666 | train | 52,392 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.sum | def sum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_sum, operand, vars_list) | python | def sum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_sum, operand, vars_list) | [
"def",
"sum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
"."... | Returns the TensorFluent for the sum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the sum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"sum",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L668-L680 | train | 52,393 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.avg | def avg(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_mean, operand, vars_list) | python | def avg(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_mean, operand, vars_list) | [
"def",
"avg",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
"."... | Returns the TensorFluent for the avg aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the avg aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"avg",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L682-L694 | train | 52,394 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.prod | def prod(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list) | python | def prod(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function.
'''
operand = self
if operand.dtype == tf.bool:
operand = operand.cast(tf.float32)
return self._aggregation_op(tf.reduce_prod, operand, vars_list) | [
"def",
"prod",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"operand",
"=",
"self",
"if",
"operand",
".",
"dtype",
"==",
"tf",
".",
"bool",
":",
"operand",
"=",
"operand",
".",
"cast",
"(",
"tf",
".... | Returns the TensorFluent for the prod aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the prod aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"prod",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L696-L708 | train | 52,395 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.maximum | def maximum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
'''
return self._aggregation_op(tf.reduce_max, self, vars_list) | python | def maximum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function.
'''
return self._aggregation_op(tf.reduce_max, self, vars_list) | [
"def",
"maximum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_max",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the maximum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the maximum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"maximum",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L710-L719 | train | 52,396 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.minimum | def minimum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function.
'''
return self._aggregation_op(tf.reduce_min, self, vars_list) | python | def minimum(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function.
'''
return self._aggregation_op(tf.reduce_min, self, vars_list) | [
"def",
"minimum",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_min",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the minimum aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the minimum aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"minimum",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L721-L730 | train | 52,397 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.forall | def forall(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function.
'''
return self._aggregation_op(tf.reduce_all, self, vars_list) | python | def forall(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function.
'''
return self._aggregation_op(tf.reduce_all, self, vars_list) | [
"def",
"forall",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_all",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the forall aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the forall aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"forall",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L732-L741 | train | 52,398 |
thiagopbueno/rddl2tf | rddl2tf/fluent.py | TensorFluent.exists | def exists(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
'''
return self._aggregation_op(tf.reduce_any, self, vars_list) | python | def exists(self, vars_list: List[str]) -> 'TensorFluent':
'''Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function.
'''
return self._aggregation_op(tf.reduce_any, self, vars_list) | [
"def",
"exists",
"(",
"self",
",",
"vars_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"'TensorFluent'",
":",
"return",
"self",
".",
"_aggregation_op",
"(",
"tf",
".",
"reduce_any",
",",
"self",
",",
"vars_list",
")"
] | Returns the TensorFluent for the exists aggregation function.
Args:
vars_list: The list of variables to be aggregated over.
Returns:
A TensorFluent wrapping the exists aggregation function. | [
"Returns",
"the",
"TensorFluent",
"for",
"the",
"exists",
"aggregation",
"function",
"."
] | f7c03d3a74d2663807c1e23e04eeed2e85166b71 | https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluent.py#L743-L752 | train | 52,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.