repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
RI-imaging/nrefocus | nrefocus/metrics.py | contrast_rms | def contrast_rms(data, *kwargs):
""" Compute RMS contrast norm of an image
"""
av = np.average(data, *kwargs)
mal = 1 / (data.shape[0] * data.shape[1])
return np.sqrt(mal * np.sum(np.square(data - av))) | python | def contrast_rms(data, *kwargs):
""" Compute RMS contrast norm of an image
"""
av = np.average(data, *kwargs)
mal = 1 / (data.shape[0] * data.shape[1])
return np.sqrt(mal * np.sum(np.square(data - av))) | [
"def",
"contrast_rms",
"(",
"data",
",",
"*",
"kwargs",
")",
":",
"av",
"=",
"np",
".",
"average",
"(",
"data",
",",
"*",
"kwargs",
")",
"mal",
"=",
"1",
"/",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
"*",
"data",
".",
"shape",
"[",
"1",
"]",... | Compute RMS contrast norm of an image | [
"Compute",
"RMS",
"contrast",
"norm",
"of",
"an",
"image"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L10-L15 | train | 51,100 |
RI-imaging/nrefocus | nrefocus/metrics.py | spectral | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | python | def spectral(data, lambd, *kwargs):
""" Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels
"""
# Set up fast fourier transform
# if not data.dtype == np.dtype(np.complex):
# data = np.array(data, dtype=np.complex)
# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,
# direction="forward", flags=_fftwflags)
# fftdata = np.zeros(data.shape, dtype=np.complex)
# fftplan.guru_execute_dft(data, fftdata)
# fftw.destroy_plan(fftplan)
fftdata = np.fft.fftn(data)
# Filter Fourier transform
fftdata[0, 0] = 0
kx = 2 * np.pi * np.fft.fftfreq(data.shape[0]).reshape(1, -1)
ky = 2 * np.pi * np.fft.fftfreq(data.shape[1]).reshape(-1, 1)
kmax = (2 * np.pi) / (2 * lambd)
fftdata[np.where(kx**2 + ky**2 > kmax**2)] = 0
spec = np.sum(np.log(1 + np.abs(fftdata))) / np.sqrt(np.prod(data.shape))
return spec | [
"def",
"spectral",
"(",
"data",
",",
"lambd",
",",
"*",
"kwargs",
")",
":",
"# Set up fast fourier transform",
"# if not data.dtype == np.dtype(np.complex):",
"# data = np.array(data, dtype=np.complex)",
"# fftplan = fftw3.Plan(data.copy(), None, nthreads = _ncores,",
"# ... | Compute spectral contrast of image
Performs bandpass filtering in Fourier space according to optical
limit of detection system, approximated by twice the wavelength.
Parameters
----------
data : 2d ndarray
the image to compute the norm from
lambd : float
wavelength of the light in pixels | [
"Compute",
"spectral",
"contrast",
"of",
"image"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/metrics.py#L18-L52 | train | 51,101 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | write | def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding)) | python | def write(s, path, encoding="utf-8"):
"""Write string to text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "wb") as f:
if is_gzip:
f.write(zlib.compress(s.encode(encoding)))
else:
f.write(s.encode(encoding)) | [
"def",
"write",
"(",
"s",
",",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"is_gzip",
"=",
"is_gzip_file",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"if",
"is_gzip",
":",
"f",
".",
"write",
"(",
"... | Write string to text file. | [
"Write",
"string",
"to",
"text",
"file",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L44-L53 | train | 51,102 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | read | def read(path, encoding="utf-8"):
"""Read string from text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "rb") as f:
if is_gzip:
return zlib.decompress(f.read()).decode(encoding)
else:
return f.read().decode(encoding) | python | def read(path, encoding="utf-8"):
"""Read string from text file.
"""
is_gzip = is_gzip_file(path)
with open(path, "rb") as f:
if is_gzip:
return zlib.decompress(f.read()).decode(encoding)
else:
return f.read().decode(encoding) | [
"def",
"read",
"(",
"path",
",",
"encoding",
"=",
"\"utf-8\"",
")",
":",
"is_gzip",
"=",
"is_gzip_file",
"(",
"path",
")",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"if",
"is_gzip",
":",
"return",
"zlib",
".",
"decompress",
"("... | Read string from text file. | [
"Read",
"string",
"from",
"text",
"file",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L63-L72 | train | 51,103 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | smartread | def smartread(path):
"""Read text from file, automatically detect encoding. ``chardet`` required.
"""
with open(path, "rb") as f:
content = f.read()
result = chardet.detect(content)
return content.decode(result["encoding"]) | python | def smartread(path):
"""Read text from file, automatically detect encoding. ``chardet`` required.
"""
with open(path, "rb") as f:
content = f.read()
result = chardet.detect(content)
return content.decode(result["encoding"]) | [
"def",
"smartread",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"result",
"=",
"chardet",
".",
"detect",
"(",
"content",
")",
"return",
"content",
".",
"decode"... | Read text from file, automatically detect encoding. ``chardet`` required. | [
"Read",
"text",
"from",
"file",
"automatically",
"detect",
"encoding",
".",
"chardet",
"required",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L82-L88 | train | 51,104 |
MacHu-GWU/dataIO-project | dataIO/textfile.py | to_utf8 | def to_utf8(path, output_path=None):
"""Convert any text file to utf8 encoding.
"""
if output_path is None:
basename, ext = os.path.splitext(path)
output_path = basename + "-UTF8Encode" + ext
text = smartread(path)
write(text, output_path) | python | def to_utf8(path, output_path=None):
"""Convert any text file to utf8 encoding.
"""
if output_path is None:
basename, ext = os.path.splitext(path)
output_path = basename + "-UTF8Encode" + ext
text = smartread(path)
write(text, output_path) | [
"def",
"to_utf8",
"(",
"path",
",",
"output_path",
"=",
"None",
")",
":",
"if",
"output_path",
"is",
"None",
":",
"basename",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"output_path",
"=",
"basename",
"+",
"\"-UTF8Encode\"",
... | Convert any text file to utf8 encoding. | [
"Convert",
"any",
"text",
"file",
"to",
"utf8",
"encoding",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/textfile.py#L91-L99 | train | 51,105 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.load_cache | def load_cache(self):
"""Load the cached Zotero data."""
with open(self.cache_path, "rb") as f:
print("Loading cached Zotero data...")
cache = pickle.load(f)
self._references = cache[self.CACHE_REFERENCE_LIST]
self.reference_types = cache[self.CACHE_REFERENCE_TYPES]
self.reference_templates = cache[self.CACHE_REFERENCE_TEMPLATES]
print("Cached Zotero data loaded.") | python | def load_cache(self):
"""Load the cached Zotero data."""
with open(self.cache_path, "rb") as f:
print("Loading cached Zotero data...")
cache = pickle.load(f)
self._references = cache[self.CACHE_REFERENCE_LIST]
self.reference_types = cache[self.CACHE_REFERENCE_TYPES]
self.reference_templates = cache[self.CACHE_REFERENCE_TEMPLATES]
print("Cached Zotero data loaded.") | [
"def",
"load_cache",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"cache_path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"print",
"(",
"\"Loading cached Zotero data...\"",
")",
"cache",
"=",
"pickle",
".",
"load",
"(",
"f",
")",
"self",
".",
"_r... | Load the cached Zotero data. | [
"Load",
"the",
"cached",
"Zotero",
"data",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L38-L46 | train | 51,106 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.load_distant | def load_distant(self):
"""Load the distant Zotero data."""
print("Loading distant Zotero data...")
self._references = self.get_references()
self.reference_types = self.get_reference_types()
self.reference_templates = self.get_reference_templates(self.reference_types)
print("Distant Zotero data loaded.")
self.cache() | python | def load_distant(self):
"""Load the distant Zotero data."""
print("Loading distant Zotero data...")
self._references = self.get_references()
self.reference_types = self.get_reference_types()
self.reference_templates = self.get_reference_templates(self.reference_types)
print("Distant Zotero data loaded.")
self.cache() | [
"def",
"load_distant",
"(",
"self",
")",
":",
"print",
"(",
"\"Loading distant Zotero data...\"",
")",
"self",
".",
"_references",
"=",
"self",
".",
"get_references",
"(",
")",
"self",
".",
"reference_types",
"=",
"self",
".",
"get_reference_types",
"(",
")",
... | Load the distant Zotero data. | [
"Load",
"the",
"distant",
"Zotero",
"data",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L48-L55 | train | 51,107 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.cache | def cache(self):
"""Cache the Zotero data."""
with open(self.cache_path, "wb") as f:
cache = {self.CACHE_REFERENCE_LIST: self._references,
self.CACHE_REFERENCE_TYPES: self.reference_types,
self.CACHE_REFERENCE_TEMPLATES: self.reference_templates}
pickle.dump(cache, f) | python | def cache(self):
"""Cache the Zotero data."""
with open(self.cache_path, "wb") as f:
cache = {self.CACHE_REFERENCE_LIST: self._references,
self.CACHE_REFERENCE_TYPES: self.reference_types,
self.CACHE_REFERENCE_TEMPLATES: self.reference_templates}
pickle.dump(cache, f) | [
"def",
"cache",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"cache_path",
",",
"\"wb\"",
")",
"as",
"f",
":",
"cache",
"=",
"{",
"self",
".",
"CACHE_REFERENCE_LIST",
":",
"self",
".",
"_references",
",",
"self",
".",
"CACHE_REFERENCE_TYPES",... | Cache the Zotero data. | [
"Cache",
"the",
"Zotero",
"data",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L57-L63 | train | 51,108 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.create_distant_reference | def create_distant_reference(self, ref_data):
"""Validate and create the reference in Zotero and return the created item."""
self.validate_reference_data(ref_data)
creation_status = self._zotero_lib.create_items([ref_data])
try:
created_item = creation_status["successful"]["0"]
return created_item
except KeyError as e:
print(creation_status)
raise CreateZoteroItemError from e | python | def create_distant_reference(self, ref_data):
"""Validate and create the reference in Zotero and return the created item."""
self.validate_reference_data(ref_data)
creation_status = self._zotero_lib.create_items([ref_data])
try:
created_item = creation_status["successful"]["0"]
return created_item
except KeyError as e:
print(creation_status)
raise CreateZoteroItemError from e | [
"def",
"create_distant_reference",
"(",
"self",
",",
"ref_data",
")",
":",
"self",
".",
"validate_reference_data",
"(",
"ref_data",
")",
"creation_status",
"=",
"self",
".",
"_zotero_lib",
".",
"create_items",
"(",
"[",
"ref_data",
"]",
")",
"try",
":",
"creat... | Validate and create the reference in Zotero and return the created item. | [
"Validate",
"and",
"create",
"the",
"reference",
"in",
"Zotero",
"and",
"return",
"the",
"created",
"item",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L70-L79 | train | 51,109 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.update_local_reference | def update_local_reference(self, index, ref):
"""Replace the reference in the reference list and cache it."""
self._references[index] = ref
self.cache() | python | def update_local_reference(self, index, ref):
"""Replace the reference in the reference list and cache it."""
self._references[index] = ref
self.cache() | [
"def",
"update_local_reference",
"(",
"self",
",",
"index",
",",
"ref",
")",
":",
"self",
".",
"_references",
"[",
"index",
"]",
"=",
"ref",
"self",
".",
"cache",
"(",
")"
] | Replace the reference in the reference list and cache it. | [
"Replace",
"the",
"reference",
"in",
"the",
"reference",
"list",
"and",
"cache",
"it",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L81-L84 | train | 51,110 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.update_distant_reference | def update_distant_reference(self, ref):
"""Validate and update the reference in Zotero.
Existing fields not present will be left unmodified.
"""
self.validate_reference_data(ref["data"])
self._zotero_lib.update_item(ref) | python | def update_distant_reference(self, ref):
"""Validate and update the reference in Zotero.
Existing fields not present will be left unmodified.
"""
self.validate_reference_data(ref["data"])
self._zotero_lib.update_item(ref) | [
"def",
"update_distant_reference",
"(",
"self",
",",
"ref",
")",
":",
"self",
".",
"validate_reference_data",
"(",
"ref",
"[",
"\"data\"",
"]",
")",
"self",
".",
"_zotero_lib",
".",
"update_item",
"(",
"ref",
")"
] | Validate and update the reference in Zotero.
Existing fields not present will be left unmodified. | [
"Validate",
"and",
"update",
"the",
"reference",
"in",
"Zotero",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L86-L92 | train | 51,111 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.validate_reference_data | def validate_reference_data(self, ref_data):
"""Validate the reference data.
Zotero.check_items() caches data after the first API call.
"""
try:
self._zotero_lib.check_items([ref_data])
except InvalidItemFields as e:
raise InvalidZoteroItemError from e | python | def validate_reference_data(self, ref_data):
"""Validate the reference data.
Zotero.check_items() caches data after the first API call.
"""
try:
self._zotero_lib.check_items([ref_data])
except InvalidItemFields as e:
raise InvalidZoteroItemError from e | [
"def",
"validate_reference_data",
"(",
"self",
",",
"ref_data",
")",
":",
"try",
":",
"self",
".",
"_zotero_lib",
".",
"check_items",
"(",
"[",
"ref_data",
"]",
")",
"except",
"InvalidItemFields",
"as",
"e",
":",
"raise",
"InvalidZoteroItemError",
"from",
"e"
... | Validate the reference data.
Zotero.check_items() caches data after the first API call. | [
"Validate",
"the",
"reference",
"data",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L94-L102 | train | 51,112 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.get_reference_types | def get_reference_types(self):
"""Return the reference types.
Zotero.item_types() caches data after the first API call.
"""
item_types = self._zotero_lib.item_types()
return sorted([x["itemType"] for x in item_types]) | python | def get_reference_types(self):
"""Return the reference types.
Zotero.item_types() caches data after the first API call.
"""
item_types = self._zotero_lib.item_types()
return sorted([x["itemType"] for x in item_types]) | [
"def",
"get_reference_types",
"(",
"self",
")",
":",
"item_types",
"=",
"self",
".",
"_zotero_lib",
".",
"item_types",
"(",
")",
"return",
"sorted",
"(",
"[",
"x",
"[",
"\"itemType\"",
"]",
"for",
"x",
"in",
"item_types",
"]",
")"
] | Return the reference types.
Zotero.item_types() caches data after the first API call. | [
"Return",
"the",
"reference",
"types",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L108-L114 | train | 51,113 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.get_reference_templates | def get_reference_templates(self, ref_types):
"""Return the reference templates for the types as an ordered dictionary."""
return OrderedDict([(x, self.get_reference_template(x)) for x in ref_types]) | python | def get_reference_templates(self, ref_types):
"""Return the reference templates for the types as an ordered dictionary."""
return OrderedDict([(x, self.get_reference_template(x)) for x in ref_types]) | [
"def",
"get_reference_templates",
"(",
"self",
",",
"ref_types",
")",
":",
"return",
"OrderedDict",
"(",
"[",
"(",
"x",
",",
"self",
".",
"get_reference_template",
"(",
"x",
")",
")",
"for",
"x",
"in",
"ref_types",
"]",
")"
] | Return the reference templates for the types as an ordered dictionary. | [
"Return",
"the",
"reference",
"templates",
"for",
"the",
"types",
"as",
"an",
"ordered",
"dictionary",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L116-L118 | train | 51,114 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.get_reference_template | def get_reference_template(self, ref_type):
"""Return the reference template for the type as an ordered dictionary.
Zotero.item_template() caches data after the first API call.
"""
template = self._zotero_lib.item_template(ref_type)
return OrderedDict(sorted(template.items(), key=lambda x: x[0])) | python | def get_reference_template(self, ref_type):
"""Return the reference template for the type as an ordered dictionary.
Zotero.item_template() caches data after the first API call.
"""
template = self._zotero_lib.item_template(ref_type)
return OrderedDict(sorted(template.items(), key=lambda x: x[0])) | [
"def",
"get_reference_template",
"(",
"self",
",",
"ref_type",
")",
":",
"template",
"=",
"self",
".",
"_zotero_lib",
".",
"item_template",
"(",
"ref_type",
")",
"return",
"OrderedDict",
"(",
"sorted",
"(",
"template",
".",
"items",
"(",
")",
",",
"key",
"... | Return the reference template for the type as an ordered dictionary.
Zotero.item_template() caches data after the first API call. | [
"Return",
"the",
"reference",
"template",
"for",
"the",
"type",
"as",
"an",
"ordered",
"dictionary",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L120-L126 | train | 51,115 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.reference_extra_field | def reference_extra_field(self, field, index):
"""Return the value of the field in 'extra', otherwise ''."""
ref_data = self.reference_data(index)
extra_fields = ref_data["extra"].split("\n")
field_id = field + ":"
matched = next((x for x in extra_fields if x.startswith(field_id)), None)
if matched:
return matched.replace(field_id, "", 1).strip()
else:
return "" | python | def reference_extra_field(self, field, index):
"""Return the value of the field in 'extra', otherwise ''."""
ref_data = self.reference_data(index)
extra_fields = ref_data["extra"].split("\n")
field_id = field + ":"
matched = next((x for x in extra_fields if x.startswith(field_id)), None)
if matched:
return matched.replace(field_id, "", 1).strip()
else:
return "" | [
"def",
"reference_extra_field",
"(",
"self",
",",
"field",
",",
"index",
")",
":",
"ref_data",
"=",
"self",
".",
"reference_data",
"(",
"index",
")",
"extra_fields",
"=",
"ref_data",
"[",
"\"extra\"",
"]",
".",
"split",
"(",
"\"\\n\"",
")",
"field_id",
"="... | Return the value of the field in 'extra', otherwise ''. | [
"Return",
"the",
"value",
"of",
"the",
"field",
"in",
"extra",
"otherwise",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L142-L151 | train | 51,116 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.reference_doi | def reference_doi(self, index):
"""Return the reference DOI."""
return self.reference_data(index).get("DOI", self.reference_extra_field("DOI", index)) | python | def reference_doi(self, index):
"""Return the reference DOI."""
return self.reference_data(index).get("DOI", self.reference_extra_field("DOI", index)) | [
"def",
"reference_doi",
"(",
"self",
",",
"index",
")",
":",
"return",
"self",
".",
"reference_data",
"(",
"index",
")",
".",
"get",
"(",
"\"DOI\"",
",",
"self",
".",
"reference_extra_field",
"(",
"\"DOI\"",
",",
"index",
")",
")"
] | Return the reference DOI. | [
"Return",
"the",
"reference",
"DOI",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L177-L179 | train | 51,117 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.reference_year | def reference_year(self, index):
"""Return the reference publication year."""
# TODO Use meta:parsedDate field instead?
ref_date = self.reference_date(index)
try:
# NB: datetime.year returns an int.
return parse(ref_date).year
except ValueError:
matched = re.search(r"\d{4}", ref_date)
if matched:
return int(matched.group())
else:
return "" | python | def reference_year(self, index):
"""Return the reference publication year."""
# TODO Use meta:parsedDate field instead?
ref_date = self.reference_date(index)
try:
# NB: datetime.year returns an int.
return parse(ref_date).year
except ValueError:
matched = re.search(r"\d{4}", ref_date)
if matched:
return int(matched.group())
else:
return "" | [
"def",
"reference_year",
"(",
"self",
",",
"index",
")",
":",
"# TODO Use meta:parsedDate field instead?",
"ref_date",
"=",
"self",
".",
"reference_date",
"(",
"index",
")",
"try",
":",
"# NB: datetime.year returns an int.",
"return",
"parse",
"(",
"ref_date",
")",
... | Return the reference publication year. | [
"Return",
"the",
"reference",
"publication",
"year",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L219-L231 | train | 51,118 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.reference_journal | def reference_journal(self, index):
"""Return the reference journal name."""
# TODO Change the column name 'Journal' to an other?
ref_type = self.reference_type(index)
if ref_type == "journalArticle":
return self.reference_data(index)["publicationTitle"]
else:
return "({})".format(ref_type) | python | def reference_journal(self, index):
"""Return the reference journal name."""
# TODO Change the column name 'Journal' to an other?
ref_type = self.reference_type(index)
if ref_type == "journalArticle":
return self.reference_data(index)["publicationTitle"]
else:
return "({})".format(ref_type) | [
"def",
"reference_journal",
"(",
"self",
",",
"index",
")",
":",
"# TODO Change the column name 'Journal' to an other?",
"ref_type",
"=",
"self",
".",
"reference_type",
"(",
"index",
")",
"if",
"ref_type",
"==",
"\"journalArticle\"",
":",
"return",
"self",
".",
"ref... | Return the reference journal name. | [
"Return",
"the",
"reference",
"journal",
"name",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L233-L240 | train | 51,119 |
BlueBrain/nat | nat/zotero_wrap.py | ZoteroWrap.reference_index | def reference_index(self, ref_id):
"""Return the first reference with this ID."""
try:
indexes = range(self.reference_count())
return next(i for i in indexes if self.reference_id(i) == ref_id)
except StopIteration as e:
raise ReferenceNotFoundError("ID: " + ref_id) from e | python | def reference_index(self, ref_id):
"""Return the first reference with this ID."""
try:
indexes = range(self.reference_count())
return next(i for i in indexes if self.reference_id(i) == ref_id)
except StopIteration as e:
raise ReferenceNotFoundError("ID: " + ref_id) from e | [
"def",
"reference_index",
"(",
"self",
",",
"ref_id",
")",
":",
"try",
":",
"indexes",
"=",
"range",
"(",
"self",
".",
"reference_count",
"(",
")",
")",
"return",
"next",
"(",
"i",
"for",
"i",
"in",
"indexes",
"if",
"self",
".",
"reference_id",
"(",
... | Return the first reference with this ID. | [
"Return",
"the",
"first",
"reference",
"with",
"this",
"ID",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L244-L250 | train | 51,120 |
BlueBrain/nat | nat/restServer.py | computePDFSimilarity | def computePDFSimilarity(paperId, userPDF):
if not isPDFInDb(paperId):
return None
userPDF.save("temp.pdf")
# check_call is blocking
check_call(['pdftotext', '-enc', 'UTF-8', "temp.pdf", "temp.txt"])
os.remove("temp.pdf")
a = open("temp.txt", 'r').read()
b = open(join(dbPath, paperId) + ".txt", 'r').read()
import nltk, string
from sklearn.feature_extraction.text import TfidfVectorizer
stemmer = nltk.stem.porter.PorterStemmer()
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
def stem_tokens(tokens):
return [stemmer.stem(item) for item in tokens]
'''remove punctuation, lowercase, stem'''
def normalize(text):
return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))
vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
def cosine_sim(text1, text2):
tfidf = vectorizer.fit_transform([text1, text2])
return ((tfidf * tfidf.T).A)[0,1]
similarity = cosine_sim(a, b)
os.remove("temp.txt")
return similarity | python | def computePDFSimilarity(paperId, userPDF):
if not isPDFInDb(paperId):
return None
userPDF.save("temp.pdf")
# check_call is blocking
check_call(['pdftotext', '-enc', 'UTF-8', "temp.pdf", "temp.txt"])
os.remove("temp.pdf")
a = open("temp.txt", 'r').read()
b = open(join(dbPath, paperId) + ".txt", 'r').read()
import nltk, string
from sklearn.feature_extraction.text import TfidfVectorizer
stemmer = nltk.stem.porter.PorterStemmer()
remove_punctuation_map = dict((ord(char), None) for char in string.punctuation)
def stem_tokens(tokens):
return [stemmer.stem(item) for item in tokens]
'''remove punctuation, lowercase, stem'''
def normalize(text):
return stem_tokens(nltk.word_tokenize(text.lower().translate(remove_punctuation_map)))
vectorizer = TfidfVectorizer(tokenizer=normalize, stop_words='english')
def cosine_sim(text1, text2):
tfidf = vectorizer.fit_transform([text1, text2])
return ((tfidf * tfidf.T).A)[0,1]
similarity = cosine_sim(a, b)
os.remove("temp.txt")
return similarity | [
"def",
"computePDFSimilarity",
"(",
"paperId",
",",
"userPDF",
")",
":",
"if",
"not",
"isPDFInDb",
"(",
"paperId",
")",
":",
"return",
"None",
"userPDF",
".",
"save",
"(",
"\"temp.pdf\"",
")",
"# check_call is blocking",
"check_call",
"(",
"[",
"'pdftotext'",
... | remove punctuation, lowercase, stem | [
"remove",
"punctuation",
"lowercase",
"stem"
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/restServer.py#L339-L374 | train | 51,121 |
lablup/backend.ai-common | src/ai/backend/common/plugin.py | install_plugins | def install_plugins(plugins, app, install_type, config):
"""
Automatically install plugins to the app.
:param plugins: List of plugin names to discover and install plugins
:param app: Any type of app to install plugins
:param install_type: The way to install plugins to app
:param config: Config object to initialize plugins
:return:
You should note that app can be any type of object. For instance,
when used in manager, app param is the instance of aiohttp.web.Application,
but it is the instance of subclass of aiozmq.rpc.AttrHandler in agents.
Therefore, you should specify :install_type: to install plugins into different
types of apps correctly. Currently we support two types of :install_type:,
which are 'attr' and 'dict'. For 'attr', plugins will be installed to app
as its attributes. For 'dict', plugins will be installed as following:
app[plugin_name] = plugin.
"""
try:
disable_plugins = config.disable_plugins
if not disable_plugins:
disable_plugins = []
except AttributeError:
disable_plugins = []
for plugin_name in plugins:
plugin_group = f'backendai_{plugin_name}_v10'
registry = PluginRegistry(plugin_name)
for entrypoint in pkg_resources.iter_entry_points(plugin_group):
if entrypoint.name in disable_plugins:
continue
log.info('Installing plugin: {}.{}', plugin_group, entrypoint.name)
plugin_module = entrypoint.load()
plugin = getattr(plugin_module, 'get_plugin')(config)
registry.register(plugin)
if install_type == 'attr':
setattr(app, plugin_name, registry)
elif install_type == 'dict':
assert isinstance(app, typing.MutableMapping), \
(f"app must be an instance of MutableMapping "
f"for 'dict' install_type.")
app[plugin_name] = registry
else:
raise ValueError(f'Invalid install type: {install_type}') | python | def install_plugins(plugins, app, install_type, config):
"""
Automatically install plugins to the app.
:param plugins: List of plugin names to discover and install plugins
:param app: Any type of app to install plugins
:param install_type: The way to install plugins to app
:param config: Config object to initialize plugins
:return:
You should note that app can be any type of object. For instance,
when used in manager, app param is the instance of aiohttp.web.Application,
but it is the instance of subclass of aiozmq.rpc.AttrHandler in agents.
Therefore, you should specify :install_type: to install plugins into different
types of apps correctly. Currently we support two types of :install_type:,
which are 'attr' and 'dict'. For 'attr', plugins will be installed to app
as its attributes. For 'dict', plugins will be installed as following:
app[plugin_name] = plugin.
"""
try:
disable_plugins = config.disable_plugins
if not disable_plugins:
disable_plugins = []
except AttributeError:
disable_plugins = []
for plugin_name in plugins:
plugin_group = f'backendai_{plugin_name}_v10'
registry = PluginRegistry(plugin_name)
for entrypoint in pkg_resources.iter_entry_points(plugin_group):
if entrypoint.name in disable_plugins:
continue
log.info('Installing plugin: {}.{}', plugin_group, entrypoint.name)
plugin_module = entrypoint.load()
plugin = getattr(plugin_module, 'get_plugin')(config)
registry.register(plugin)
if install_type == 'attr':
setattr(app, plugin_name, registry)
elif install_type == 'dict':
assert isinstance(app, typing.MutableMapping), \
(f"app must be an instance of MutableMapping "
f"for 'dict' install_type.")
app[plugin_name] = registry
else:
raise ValueError(f'Invalid install type: {install_type}') | [
"def",
"install_plugins",
"(",
"plugins",
",",
"app",
",",
"install_type",
",",
"config",
")",
":",
"try",
":",
"disable_plugins",
"=",
"config",
".",
"disable_plugins",
"if",
"not",
"disable_plugins",
":",
"disable_plugins",
"=",
"[",
"]",
"except",
"Attribut... | Automatically install plugins to the app.
:param plugins: List of plugin names to discover and install plugins
:param app: Any type of app to install plugins
:param install_type: The way to install plugins to app
:param config: Config object to initialize plugins
:return:
You should note that app can be any type of object. For instance,
when used in manager, app param is the instance of aiohttp.web.Application,
but it is the instance of subclass of aiozmq.rpc.AttrHandler in agents.
Therefore, you should specify :install_type: to install plugins into different
types of apps correctly. Currently we support two types of :install_type:,
which are 'attr' and 'dict'. For 'attr', plugins will be installed to app
as its attributes. For 'dict', plugins will be installed as following:
app[plugin_name] = plugin. | [
"Automatically",
"install",
"plugins",
"to",
"the",
"app",
"."
] | 20b3a2551ee5bb3b88e7836471bc244a70ad0ae6 | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/plugin.py#L70-L114 | train | 51,122 |
RI-imaging/nrefocus | examples/example_helper.py | load_cell | def load_cell(fname="HL60_field.zip"):
"Load zip file and return complex field"
here = op.dirname(op.abspath(__file__))
data = op.join(here, "data")
arc = zipfile.ZipFile(op.join(data, fname))
for f in arc.filelist:
with arc.open(f) as fd:
if f.filename.count("imag"):
imag = np.loadtxt(fd)
elif f.filename.count("real"):
real = np.loadtxt(fd)
field = real + 1j * imag
return field | python | def load_cell(fname="HL60_field.zip"):
"Load zip file and return complex field"
here = op.dirname(op.abspath(__file__))
data = op.join(here, "data")
arc = zipfile.ZipFile(op.join(data, fname))
for f in arc.filelist:
with arc.open(f) as fd:
if f.filename.count("imag"):
imag = np.loadtxt(fd)
elif f.filename.count("real"):
real = np.loadtxt(fd)
field = real + 1j * imag
return field | [
"def",
"load_cell",
"(",
"fname",
"=",
"\"HL60_field.zip\"",
")",
":",
"here",
"=",
"op",
".",
"dirname",
"(",
"op",
".",
"abspath",
"(",
"__file__",
")",
")",
"data",
"=",
"op",
".",
"join",
"(",
"here",
",",
"\"data\"",
")",
"arc",
"=",
"zipfile",
... | Load zip file and return complex field | [
"Load",
"zip",
"file",
"and",
"return",
"complex",
"field"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/examples/example_helper.py#L7-L21 | train | 51,123 |
rainwoodman/kdcount | kdcount/sphere.py | bootstrap | def bootstrap(nside, rand, nbar, *data):
""" This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass
"""
def split(data, indices, axis):
""" This function splits array. It fixes the bug
in numpy that zero length array are improperly handled.
In the future this will be fixed.
"""
s = []
s.append(slice(0, indices[0]))
for i in range(len(indices) - 1):
s.append(slice(indices[i], indices[i+1]))
s.append(slice(indices[-1], None))
rt = []
for ss in s:
ind = [slice(None, None, None) for i in range(len(data.shape))]
ind[axis] = ss
ind = tuple(ind)
rt.append(data[ind])
return rt
def hpsplit(nside, data):
# data is (RA, DEC)
RA, DEC = data
pix = radec2pix(nside, RA, DEC)
n = numpy.bincount(pix)
a = numpy.argsort(pix)
data = numpy.array(data)[:, a]
rt = split(data, n.cumsum(), axis=-1)
return rt
# mean area of sky.
Abar = 41252.96 / nside2npix(nside)
rand = hpsplit(nside, rand)
if len(data) > 0:
data = [list(i) for i in zip(*[hpsplit(nside, d1) for d1 in data])]
else:
data = [[] for i in range(len(rand))]
heap = []
j = 0
for r, d in zip(rand, data):
if len(r[0]) == 0: continue
a = 1.0 * len(r[0]) / nbar
j = j + 1
if len(heap) == 0:
heapq.heappush(heap, (a, j, r, d))
else:
a0, j0, r0, d0 = heapq.heappop(heap)
if a0 + a < Abar:
a0 += a
d0 = [
numpy.concatenate((d0[i], d[i]), axis=-1)
for i in range(len(d))
]
r0 = numpy.concatenate((r0, r), axis=-1)
else:
heapq.heappush(heap, (a, j, r, d))
heapq.heappush(heap, (a0, j0, r0, d0))
for i in range(len(heap)):
area, j, r, d = heapq.heappop(heap)
rt = [area, r] + d
yield rt | python | def bootstrap(nside, rand, nbar, *data):
""" This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass
"""
def split(data, indices, axis):
""" This function splits array. It fixes the bug
in numpy that zero length array are improperly handled.
In the future this will be fixed.
"""
s = []
s.append(slice(0, indices[0]))
for i in range(len(indices) - 1):
s.append(slice(indices[i], indices[i+1]))
s.append(slice(indices[-1], None))
rt = []
for ss in s:
ind = [slice(None, None, None) for i in range(len(data.shape))]
ind[axis] = ss
ind = tuple(ind)
rt.append(data[ind])
return rt
def hpsplit(nside, data):
# data is (RA, DEC)
RA, DEC = data
pix = radec2pix(nside, RA, DEC)
n = numpy.bincount(pix)
a = numpy.argsort(pix)
data = numpy.array(data)[:, a]
rt = split(data, n.cumsum(), axis=-1)
return rt
# mean area of sky.
Abar = 41252.96 / nside2npix(nside)
rand = hpsplit(nside, rand)
if len(data) > 0:
data = [list(i) for i in zip(*[hpsplit(nside, d1) for d1 in data])]
else:
data = [[] for i in range(len(rand))]
heap = []
j = 0
for r, d in zip(rand, data):
if len(r[0]) == 0: continue
a = 1.0 * len(r[0]) / nbar
j = j + 1
if len(heap) == 0:
heapq.heappush(heap, (a, j, r, d))
else:
a0, j0, r0, d0 = heapq.heappop(heap)
if a0 + a < Abar:
a0 += a
d0 = [
numpy.concatenate((d0[i], d[i]), axis=-1)
for i in range(len(d))
]
r0 = numpy.concatenate((r0, r), axis=-1)
else:
heapq.heappush(heap, (a, j, r, d))
heapq.heappush(heap, (a0, j0, r0, d0))
for i in range(len(heap)):
area, j, r, d = heapq.heappop(heap)
rt = [area, r] + d
yield rt | [
"def",
"bootstrap",
"(",
"nside",
",",
"rand",
",",
"nbar",
",",
"*",
"data",
")",
":",
"def",
"split",
"(",
"data",
",",
"indices",
",",
"axis",
")",
":",
"\"\"\" This function splits array. It fixes the bug\n in numpy that zero length array are improperly h... | This function will bootstrap data based on the sky coverage of rand.
It is different from bootstrap in the traditional sense, but for correlation
functions it gives the correct answer with less computation.
nbar : number density of rand, used to estimate the effective area of a pixel
nside : number of healpix pixels per side to use
*data : a list of data -- will be binned on the same regions.
small regions (incomplete pixels) are combined such that the total
area is about the same (a healpix pixel) in each returned boot strap sample
Yields: area, random, *data
rand and *data are in (RA, DEC)
Example:
>>> for area, ran, data1, data2 in bootstrap(4, ran, 100., data1, data2):
>>> # Do stuff
>>> pass | [
"This",
"function",
"will",
"bootstrap",
"data",
"based",
"on",
"the",
"sky",
"coverage",
"of",
"rand",
".",
"It",
"is",
"different",
"from",
"bootstrap",
"in",
"the",
"traditional",
"sense",
"but",
"for",
"correlation",
"functions",
"it",
"gives",
"the",
"c... | 483548f6d27a4f245cd5d98880b5f4edd6cc8dc1 | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/sphere.py#L65-L153 | train | 51,124 |
anteater/anteater | anteater/src/get_lists.py | GetLists.load_project_flag_list_file | def load_project_flag_list_file(self, project_exceptions, project):
""" Loads project specific lists """
if self.loaded:
return
exception_file = None
for item in project_exceptions:
if project in item:
exception_file = item.get(project)
if exception_file is not None:
try:
with open(exception_file, 'r') as f:
ex = yaml.safe_load(f)
except IOError:
logger.error('File not found: %s', exception_file)
sys.exit(0)
for key in ex:
if key in fl:
fl[key][project] = _merge(fl[key][project], ex.get(key, None)) \
if project in fl[key] else ex.get(key, None)
self.loaded = True
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied') | python | def load_project_flag_list_file(self, project_exceptions, project):
""" Loads project specific lists """
if self.loaded:
return
exception_file = None
for item in project_exceptions:
if project in item:
exception_file = item.get(project)
if exception_file is not None:
try:
with open(exception_file, 'r') as f:
ex = yaml.safe_load(f)
except IOError:
logger.error('File not found: %s', exception_file)
sys.exit(0)
for key in ex:
if key in fl:
fl[key][project] = _merge(fl[key][project], ex.get(key, None)) \
if project in fl[key] else ex.get(key, None)
self.loaded = True
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied') | [
"def",
"load_project_flag_list_file",
"(",
"self",
",",
"project_exceptions",
",",
"project",
")",
":",
"if",
"self",
".",
"loaded",
":",
"return",
"exception_file",
"=",
"None",
"for",
"item",
"in",
"project_exceptions",
":",
"if",
"project",
"in",
"item",
":... | Loads project specific lists | [
"Loads",
"project",
"specific",
"lists"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/get_lists.py#L63-L85 | train | 51,125 |
anteater/anteater | anteater/src/get_lists.py | GetLists.binary_hash | def binary_hash(self, project, patch_file):
""" Gathers sha256 hashes from binary lists """
global il
exception_file = None
try:
project_exceptions = il.get('project_exceptions')
except KeyError:
logger.info('project_exceptions missing in %s for %s', ignore_list, project)
for project_files in project_exceptions:
if project in project_files:
exception_file = project_files.get(project)
with open(exception_file, 'r') as f:
bl = yaml.safe_load(f)
for key, value in bl.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
for key, value in il.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
hashvalue = ""
return hashvalue
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied')
hashvalue = ""
return hashvalue | python | def binary_hash(self, project, patch_file):
""" Gathers sha256 hashes from binary lists """
global il
exception_file = None
try:
project_exceptions = il.get('project_exceptions')
except KeyError:
logger.info('project_exceptions missing in %s for %s', ignore_list, project)
for project_files in project_exceptions:
if project in project_files:
exception_file = project_files.get(project)
with open(exception_file, 'r') as f:
bl = yaml.safe_load(f)
for key, value in bl.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
for key, value in il.items():
if key == 'binaries':
if patch_file in value:
hashvalue = value[patch_file]
return hashvalue
else:
hashvalue = ""
return hashvalue
else:
logger.info('%s not found in %s', project, ignore_list)
logger.info('No project specific exceptions will be applied')
hashvalue = ""
return hashvalue | [
"def",
"binary_hash",
"(",
"self",
",",
"project",
",",
"patch_file",
")",
":",
"global",
"il",
"exception_file",
"=",
"None",
"try",
":",
"project_exceptions",
"=",
"il",
".",
"get",
"(",
"'project_exceptions'",
")",
"except",
"KeyError",
":",
"logger",
"."... | Gathers sha256 hashes from binary lists | [
"Gathers",
"sha256",
"hashes",
"from",
"binary",
"lists"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/get_lists.py#L117-L150 | train | 51,126 |
anteater/anteater | anteater/src/get_lists.py | GetLists.file_audit_list | def file_audit_list(self, project):
""" Gathers file name lists """
project_list = False
self.load_project_flag_list_file(il.get('project_exceptions'), project)
try:
default_list = set((fl['file_audits']['file_names']))
except KeyError:
logger.error('Key Error processing file_names list values')
try:
project_list = set((fl['file_audits'][project]['file_names']))
logger.info('Loaded %s specific file_audits entries', project)
except KeyError:
logger.info('No project specific file_names section for project %s', project)
file_names_re = re.compile("|".join(default_list),
flags=re.IGNORECASE)
if project_list:
file_names_proj_re = re.compile("|".join(project_list),
flags=re.IGNORECASE)
return file_names_re, file_names_proj_re
else:
file_names_proj_re = re.compile("")
return file_names_re, file_names_proj_re | python | def file_audit_list(self, project):
""" Gathers file name lists """
project_list = False
self.load_project_flag_list_file(il.get('project_exceptions'), project)
try:
default_list = set((fl['file_audits']['file_names']))
except KeyError:
logger.error('Key Error processing file_names list values')
try:
project_list = set((fl['file_audits'][project]['file_names']))
logger.info('Loaded %s specific file_audits entries', project)
except KeyError:
logger.info('No project specific file_names section for project %s', project)
file_names_re = re.compile("|".join(default_list),
flags=re.IGNORECASE)
if project_list:
file_names_proj_re = re.compile("|".join(project_list),
flags=re.IGNORECASE)
return file_names_re, file_names_proj_re
else:
file_names_proj_re = re.compile("")
return file_names_re, file_names_proj_re | [
"def",
"file_audit_list",
"(",
"self",
",",
"project",
")",
":",
"project_list",
"=",
"False",
"self",
".",
"load_project_flag_list_file",
"(",
"il",
".",
"get",
"(",
"'project_exceptions'",
")",
",",
"project",
")",
"try",
":",
"default_list",
"=",
"set",
"... | Gathers file name lists | [
"Gathers",
"file",
"name",
"lists"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/get_lists.py#L152-L175 | train | 51,127 |
anteater/anteater | anteater/src/get_lists.py | GetLists.file_content_list | def file_content_list(self, project):
""" gathers content strings """
project_list = False
self.load_project_flag_list_file(il.get('project_exceptions'), project)
try:
flag_list = (fl['file_audits']['file_contents'])
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
ignore_list = il['file_audits']['file_contents']
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
project_list = fl['file_audits'][project]['file_contents']
logger.info('Loaded %s specific file_contents entries', project)
except KeyError:
logger.info('No project specific file_contents section for project %s', project)
if project_list:
ignore_list_merge = project_list + ignore_list
ignore_list_re = re.compile("|".join(ignore_list_merge), flags=re.IGNORECASE)
return flag_list, ignore_list_re
else:
ignore_list_re = re.compile("|".join(ignore_list),
flags=re.IGNORECASE)
return flag_list, ignore_list_re | python | def file_content_list(self, project):
""" gathers content strings """
project_list = False
self.load_project_flag_list_file(il.get('project_exceptions'), project)
try:
flag_list = (fl['file_audits']['file_contents'])
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
ignore_list = il['file_audits']['file_contents']
except KeyError:
logger.error('Key Error processing file_contents list values')
try:
project_list = fl['file_audits'][project]['file_contents']
logger.info('Loaded %s specific file_contents entries', project)
except KeyError:
logger.info('No project specific file_contents section for project %s', project)
if project_list:
ignore_list_merge = project_list + ignore_list
ignore_list_re = re.compile("|".join(ignore_list_merge), flags=re.IGNORECASE)
return flag_list, ignore_list_re
else:
ignore_list_re = re.compile("|".join(ignore_list),
flags=re.IGNORECASE)
return flag_list, ignore_list_re | [
"def",
"file_content_list",
"(",
"self",
",",
"project",
")",
":",
"project_list",
"=",
"False",
"self",
".",
"load_project_flag_list_file",
"(",
"il",
".",
"get",
"(",
"'project_exceptions'",
")",
",",
"project",
")",
"try",
":",
"flag_list",
"=",
"(",
"fl"... | gathers content strings | [
"gathers",
"content",
"strings"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/get_lists.py#L177-L207 | train | 51,128 |
anteater/anteater | anteater/src/get_lists.py | GetLists.ignore_directories | def ignore_directories(self, project):
""" Gathers a list of directories to ignore """
project_list = False
try:
ignore_directories = il['ignore_directories']
except KeyError:
logger.error('Key Error processing ignore_directories list values')
try:
project_exceptions = il.get('project_exceptions')
for item in project_exceptions:
if project in item:
exception_file = item.get(project)
with open(exception_file, 'r') as f:
test_list = yaml.safe_load(f)
project_list = test_list['ignore_directories']
except KeyError:
logger.info('No ignore_directories for %s', project)
if project_list:
ignore_directories = ignore_directories + project_list
return ignore_directories
else:
return ignore_directories | python | def ignore_directories(self, project):
""" Gathers a list of directories to ignore """
project_list = False
try:
ignore_directories = il['ignore_directories']
except KeyError:
logger.error('Key Error processing ignore_directories list values')
try:
project_exceptions = il.get('project_exceptions')
for item in project_exceptions:
if project in item:
exception_file = item.get(project)
with open(exception_file, 'r') as f:
test_list = yaml.safe_load(f)
project_list = test_list['ignore_directories']
except KeyError:
logger.info('No ignore_directories for %s', project)
if project_list:
ignore_directories = ignore_directories + project_list
return ignore_directories
else:
return ignore_directories | [
"def",
"ignore_directories",
"(",
"self",
",",
"project",
")",
":",
"project_list",
"=",
"False",
"try",
":",
"ignore_directories",
"=",
"il",
"[",
"'ignore_directories'",
"]",
"except",
"KeyError",
":",
"logger",
".",
"error",
"(",
"'Key Error processing ignore_d... | Gathers a list of directories to ignore | [
"Gathers",
"a",
"list",
"of",
"directories",
"to",
"ignore"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/src/get_lists.py#L209-L232 | train | 51,129 |
ornlneutronimaging/ImagingReso | ImagingReso/_utilities.py | download_from_github | def download_from_github(fname, path):
"""
Download database from GitHub
:param fname: file name with extension ('.zip') of the target item
:type fname: str
:param path: path to save unzipped files
:type path: str
:return: database folder
:rtype: folder
"""
base_url = 'https://github.com/ornlneutronimaging/ImagingReso/blob/master/ImagingReso/reference_data/'
# Add GitHub junk to the file name for downloading.
f = fname + '?raw=true'
url = base_url + f
block_size = 16384
req = urlopen(url)
# Get file size from header
if sys.version_info[0] < 3:
file_size = int(req.info().getheaders('Content-Length')[0])
else:
file_size = req.length
# downloaded = 0
# Check if file already downloaded
if os.path.exists(fname):
if os.path.getsize(fname) == file_size:
print("Skipping downloading '{}'".format(fname))
else:
overwrite = input("File size changed, overwrite '{}'? ([y]/n) ".format(fname))
if overwrite.lower().startswith('n'):
print("Local file '{}' kept without overwriting.".format(fname))
# Copy file to disk
print("Downloading '{}'... ".format(fname))
with open(fname, 'wb') as fh:
while True:
chunk = req.read(block_size)
if not chunk:
break
fh.write(chunk)
# downloaded += len(chunk)
print('')
print('Download completed.')
print("Unzipping '{}'... ".format(fname))
_database_zip = zipfile.ZipFile(fname)
_database_zip.extractall(path=path)
print("'{}' has been unzipped and database '{}' is ready to use.".format(fname, fname.replace('.zip', '')))
os.remove(fname)
print("'{}' has been deleted".format(fname)) | python | def download_from_github(fname, path):
"""
Download database from GitHub
:param fname: file name with extension ('.zip') of the target item
:type fname: str
:param path: path to save unzipped files
:type path: str
:return: database folder
:rtype: folder
"""
base_url = 'https://github.com/ornlneutronimaging/ImagingReso/blob/master/ImagingReso/reference_data/'
# Add GitHub junk to the file name for downloading.
f = fname + '?raw=true'
url = base_url + f
block_size = 16384
req = urlopen(url)
# Get file size from header
if sys.version_info[0] < 3:
file_size = int(req.info().getheaders('Content-Length')[0])
else:
file_size = req.length
# downloaded = 0
# Check if file already downloaded
if os.path.exists(fname):
if os.path.getsize(fname) == file_size:
print("Skipping downloading '{}'".format(fname))
else:
overwrite = input("File size changed, overwrite '{}'? ([y]/n) ".format(fname))
if overwrite.lower().startswith('n'):
print("Local file '{}' kept without overwriting.".format(fname))
# Copy file to disk
print("Downloading '{}'... ".format(fname))
with open(fname, 'wb') as fh:
while True:
chunk = req.read(block_size)
if not chunk:
break
fh.write(chunk)
# downloaded += len(chunk)
print('')
print('Download completed.')
print("Unzipping '{}'... ".format(fname))
_database_zip = zipfile.ZipFile(fname)
_database_zip.extractall(path=path)
print("'{}' has been unzipped and database '{}' is ready to use.".format(fname, fname.replace('.zip', '')))
os.remove(fname)
print("'{}' has been deleted".format(fname)) | [
"def",
"download_from_github",
"(",
"fname",
",",
"path",
")",
":",
"base_url",
"=",
"'https://github.com/ornlneutronimaging/ImagingReso/blob/master/ImagingReso/reference_data/'",
"# Add GitHub junk to the file name for downloading.",
"f",
"=",
"fname",
"+",
"'?raw=true'",
"url",
... | Download database from GitHub
:param fname: file name with extension ('.zip') of the target item
:type fname: str
:param path: path to save unzipped files
:type path: str
:return: database folder
:rtype: folder | [
"Download",
"database",
"from",
"GitHub"
] | 2da5cd1f565b3128f59d86bcedfd9adc2b02218b | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L18-L71 | train | 51,130 |
ornlneutronimaging/ImagingReso | ImagingReso/_utilities.py | get_list_element_from_database | def get_list_element_from_database(database='ENDF_VII'):
"""return a string array of all the element from the database
Parameters:
==========
database: string. Name of database
Raises:
======
ValueError if database can not be found
"""
_file_path = os.path.abspath(os.path.dirname(__file__))
_ref_data_folder = os.path.join(_file_path, 'reference_data')
_database_folder = os.path.join(_ref_data_folder, database)
if not os.path.exists(_ref_data_folder):
os.makedirs(_ref_data_folder)
print("Folder to store database files has been created: '{}'".format(_ref_data_folder))
if not os.path.exists(_database_folder):
print("First time using database '{}'? ".format(database))
print("I will retrieve and store a local copy of database'{}': ".format(database))
download_from_github(fname=database + '.zip', path=_ref_data_folder)
# if '/_elements_list.csv' NOT exist
if not os.path.exists(_database_folder + '/_elements_list.csv'):
# glob all .csv files
_list_files = glob.glob(_database_folder + '/*.csv')
# glob all .h5 files if NO .csv file exist
if not _list_files:
_list_files = glob.glob(_database_folder + '/*.h5')
# test if files globed
_empty_list_boo = not _list_files
if _empty_list_boo is True:
raise ValueError("'{}' does not contain any '*.csv' or '*.h5' file.".format(_database_folder))
# convert path/to/file to filename only
_list_short_filename_without_extension = [os.path.splitext(os.path.basename(_file))[0] for _file in _list_files]
# isolate element names and output as list
if '-' in _list_short_filename_without_extension[0]:
_list_element = list(set([_name.split('-')[0] for _name in _list_short_filename_without_extension]))
else:
_list_letter_part = list(
set([re.split(r'(\d+)', _name)[0] for _name in _list_short_filename_without_extension]))
_list_element = []
for each_letter_part in _list_letter_part:
if len(each_letter_part) <= 2:
_list_element.append(each_letter_part)
# save to current dir
_list_element.sort()
df_to_save = pd.DataFrame()
df_to_save['elements'] = _list_element
df_to_save.to_csv(_database_folder + '/_elements_list.csv')
# print("NOT FOUND '{}'".format(_database_folder + '/_elements_list.csv'))
# print("SAVED '{}'".format(_database_folder + '/_elements_list.csv'))
# '/_elements_list.csv' exist
else:
df_to_read = pd.read_csv(_database_folder + '/_elements_list.csv')
_list_element = list(df_to_read['elements'])
# print("FOUND '{}'".format(_database_folder + '/_elements_list.csv'))
# print("READ '{}'".format(_database_folder + '/_elements_list.csv'))
return _list_element | python | def get_list_element_from_database(database='ENDF_VII'):
"""return a string array of all the element from the database
Parameters:
==========
database: string. Name of database
Raises:
======
ValueError if database can not be found
"""
_file_path = os.path.abspath(os.path.dirname(__file__))
_ref_data_folder = os.path.join(_file_path, 'reference_data')
_database_folder = os.path.join(_ref_data_folder, database)
if not os.path.exists(_ref_data_folder):
os.makedirs(_ref_data_folder)
print("Folder to store database files has been created: '{}'".format(_ref_data_folder))
if not os.path.exists(_database_folder):
print("First time using database '{}'? ".format(database))
print("I will retrieve and store a local copy of database'{}': ".format(database))
download_from_github(fname=database + '.zip', path=_ref_data_folder)
# if '/_elements_list.csv' NOT exist
if not os.path.exists(_database_folder + '/_elements_list.csv'):
# glob all .csv files
_list_files = glob.glob(_database_folder + '/*.csv')
# glob all .h5 files if NO .csv file exist
if not _list_files:
_list_files = glob.glob(_database_folder + '/*.h5')
# test if files globed
_empty_list_boo = not _list_files
if _empty_list_boo is True:
raise ValueError("'{}' does not contain any '*.csv' or '*.h5' file.".format(_database_folder))
# convert path/to/file to filename only
_list_short_filename_without_extension = [os.path.splitext(os.path.basename(_file))[0] for _file in _list_files]
# isolate element names and output as list
if '-' in _list_short_filename_without_extension[0]:
_list_element = list(set([_name.split('-')[0] for _name in _list_short_filename_without_extension]))
else:
_list_letter_part = list(
set([re.split(r'(\d+)', _name)[0] for _name in _list_short_filename_without_extension]))
_list_element = []
for each_letter_part in _list_letter_part:
if len(each_letter_part) <= 2:
_list_element.append(each_letter_part)
# save to current dir
_list_element.sort()
df_to_save = pd.DataFrame()
df_to_save['elements'] = _list_element
df_to_save.to_csv(_database_folder + '/_elements_list.csv')
# print("NOT FOUND '{}'".format(_database_folder + '/_elements_list.csv'))
# print("SAVED '{}'".format(_database_folder + '/_elements_list.csv'))
# '/_elements_list.csv' exist
else:
df_to_read = pd.read_csv(_database_folder + '/_elements_list.csv')
_list_element = list(df_to_read['elements'])
# print("FOUND '{}'".format(_database_folder + '/_elements_list.csv'))
# print("READ '{}'".format(_database_folder + '/_elements_list.csv'))
return _list_element | [
"def",
"get_list_element_from_database",
"(",
"database",
"=",
"'ENDF_VII'",
")",
":",
"_file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"_ref_data_folder",
"=",
"os",
".",
"path",
".",... | return a string array of all the element from the database
Parameters:
==========
database: string. Name of database
Raises:
======
ValueError if database can not be found | [
"return",
"a",
"string",
"array",
"of",
"all",
"the",
"element",
"from",
"the",
"database"
] | 2da5cd1f565b3128f59d86bcedfd9adc2b02218b | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L74-L141 | train | 51,131 |
ornlneutronimaging/ImagingReso | ImagingReso/_utilities.py | get_sigma | def get_sigma(database_file_name='', e_min=np.NaN, e_max=np.NaN, e_step=np.NaN, t_kelvin=None):
"""retrieve the Energy and sigma axis for the given isotope
:param database_file_name: path/to/file with extension
:type database_file_name: string
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:param t_kelvin: temperature in Kelvin
:type t_kelvin: float
:return: {'energy': np.array, 'sigma': np.array}
:rtype: dict
"""
file_extension = os.path.splitext(database_file_name)[1]
if t_kelvin is None:
# '.csv' files
if file_extension != '.csv':
raise IOError("Cross-section File type must be '.csv'")
else:
_df = get_database_data(file_name=database_file_name)
_dict = get_interpolated_data(df=_df, e_min=e_min, e_max=e_max,
e_step=e_step)
return {'energy_eV': _dict['x_axis'],
'sigma_b': _dict['y_axis']}
else:
raise ValueError("Doppler broadened cross-section in not yet supported in current version.") | python | def get_sigma(database_file_name='', e_min=np.NaN, e_max=np.NaN, e_step=np.NaN, t_kelvin=None):
"""retrieve the Energy and sigma axis for the given isotope
:param database_file_name: path/to/file with extension
:type database_file_name: string
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:param t_kelvin: temperature in Kelvin
:type t_kelvin: float
:return: {'energy': np.array, 'sigma': np.array}
:rtype: dict
"""
file_extension = os.path.splitext(database_file_name)[1]
if t_kelvin is None:
# '.csv' files
if file_extension != '.csv':
raise IOError("Cross-section File type must be '.csv'")
else:
_df = get_database_data(file_name=database_file_name)
_dict = get_interpolated_data(df=_df, e_min=e_min, e_max=e_max,
e_step=e_step)
return {'energy_eV': _dict['x_axis'],
'sigma_b': _dict['y_axis']}
else:
raise ValueError("Doppler broadened cross-section in not yet supported in current version.") | [
"def",
"get_sigma",
"(",
"database_file_name",
"=",
"''",
",",
"e_min",
"=",
"np",
".",
"NaN",
",",
"e_max",
"=",
"np",
".",
"NaN",
",",
"e_step",
"=",
"np",
".",
"NaN",
",",
"t_kelvin",
"=",
"None",
")",
":",
"file_extension",
"=",
"os",
".",
"pat... | retrieve the Energy and sigma axis for the given isotope
:param database_file_name: path/to/file with extension
:type database_file_name: string
:param e_min: left energy range in eV of new interpolated data
:type e_min: float
:param e_max: right energy range in eV of new interpolated data
:type e_max: float
:param e_step: energy step in eV for interpolation
:type e_step: float
:param t_kelvin: temperature in Kelvin
:type t_kelvin: float
:return: {'energy': np.array, 'sigma': np.array}
:rtype: dict | [
"retrieve",
"the",
"Energy",
"and",
"sigma",
"axis",
"for",
"the",
"given",
"isotope"
] | 2da5cd1f565b3128f59d86bcedfd9adc2b02218b | https://github.com/ornlneutronimaging/ImagingReso/blob/2da5cd1f565b3128f59d86bcedfd9adc2b02218b/ImagingReso/_utilities.py#L457-L488 | train | 51,132 |
Numigi/gitoo | src/core.py | temp_repo | def temp_repo(url, branch, commit=''):
""" Clone a git repository inside a temporary folder, yield the folder then delete the folder.
:param string url: url of the repo to clone.
:param string branch: name of the branch to checkout to.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:return: yield the path to the temporary folder
:rtype: string
"""
tmp_folder = tempfile.mkdtemp()
git.Repo.clone_from(
url, tmp_folder, branch=branch
)
if commit:
git_cmd = git.Git(tmp_folder)
git_cmd.checkout(commit)
yield tmp_folder
shutil.rmtree(tmp_folder) | python | def temp_repo(url, branch, commit=''):
""" Clone a git repository inside a temporary folder, yield the folder then delete the folder.
:param string url: url of the repo to clone.
:param string branch: name of the branch to checkout to.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:return: yield the path to the temporary folder
:rtype: string
"""
tmp_folder = tempfile.mkdtemp()
git.Repo.clone_from(
url, tmp_folder, branch=branch
)
if commit:
git_cmd = git.Git(tmp_folder)
git_cmd.checkout(commit)
yield tmp_folder
shutil.rmtree(tmp_folder) | [
"def",
"temp_repo",
"(",
"url",
",",
"branch",
",",
"commit",
"=",
"''",
")",
":",
"tmp_folder",
"=",
"tempfile",
".",
"mkdtemp",
"(",
")",
"git",
".",
"Repo",
".",
"clone_from",
"(",
"url",
",",
"tmp_folder",
",",
"branch",
"=",
"branch",
")",
"if",... | Clone a git repository inside a temporary folder, yield the folder then delete the folder.
:param string url: url of the repo to clone.
:param string branch: name of the branch to checkout to.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:return: yield the path to the temporary folder
:rtype: string | [
"Clone",
"a",
"git",
"repository",
"inside",
"a",
"temporary",
"folder",
"yield",
"the",
"folder",
"then",
"delete",
"the",
"folder",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L18-L35 | train | 51,133 |
Numigi/gitoo | src/core.py | force_move | def force_move(source, destination):
""" Force the move of the source inside the destination even if the destination has already a folder with the
name inside. In the case, the folder will be replaced.
:param string source: path of the source to move.
:param string destination: path of the folder to move the source to.
"""
if not os.path.exists(destination):
raise RuntimeError(
'The code could not be moved to {destination} '
'because the folder does not exist'.format(destination=destination))
destination_folder = os.path.join(destination, os.path.split(source)[-1])
if os.path.exists(destination_folder):
shutil.rmtree(destination_folder)
shutil.move(source, destination) | python | def force_move(source, destination):
""" Force the move of the source inside the destination even if the destination has already a folder with the
name inside. In the case, the folder will be replaced.
:param string source: path of the source to move.
:param string destination: path of the folder to move the source to.
"""
if not os.path.exists(destination):
raise RuntimeError(
'The code could not be moved to {destination} '
'because the folder does not exist'.format(destination=destination))
destination_folder = os.path.join(destination, os.path.split(source)[-1])
if os.path.exists(destination_folder):
shutil.rmtree(destination_folder)
shutil.move(source, destination) | [
"def",
"force_move",
"(",
"source",
",",
"destination",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
":",
"raise",
"RuntimeError",
"(",
"'The code could not be moved to {destination} '",
"'because the folder does not exist'",
".",
... | Force the move of the source inside the destination even if the destination has already a folder with the
name inside. In the case, the folder will be replaced.
:param string source: path of the source to move.
:param string destination: path of the folder to move the source to. | [
"Force",
"the",
"move",
"of",
"the",
"source",
"inside",
"the",
"destination",
"even",
"if",
"the",
"destination",
"has",
"already",
"a",
"folder",
"with",
"the",
"name",
"inside",
".",
"In",
"the",
"case",
"the",
"folder",
"will",
"be",
"replaced",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L38-L54 | train | 51,134 |
Numigi/gitoo | src/core.py | _run_command_inside_folder | def _run_command_inside_folder(command, folder):
"""Run a command inside the given folder.
:param string command: the command to execute.
:param string folder: the folder where to execute the command.
:return: the return code of the process.
:rtype: Tuple[int, str]
"""
logger.debug("command: %s", command)
# avoid usage of shell = True
# see https://docs.openstack.org/bandit/latest/plugins/subprocess_popen_with_shell_equals_true.html
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, cwd=folder)
stream_data = process.communicate()[0]
logger.debug("%s stdout: %s (RC %s)", command, stream_data, process.returncode)
return process.returncode, stream_data | python | def _run_command_inside_folder(command, folder):
"""Run a command inside the given folder.
:param string command: the command to execute.
:param string folder: the folder where to execute the command.
:return: the return code of the process.
:rtype: Tuple[int, str]
"""
logger.debug("command: %s", command)
# avoid usage of shell = True
# see https://docs.openstack.org/bandit/latest/plugins/subprocess_popen_with_shell_equals_true.html
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, cwd=folder)
stream_data = process.communicate()[0]
logger.debug("%s stdout: %s (RC %s)", command, stream_data, process.returncode)
return process.returncode, stream_data | [
"def",
"_run_command_inside_folder",
"(",
"command",
",",
"folder",
")",
":",
"logger",
".",
"debug",
"(",
"\"command: %s\"",
",",
"command",
")",
"# avoid usage of shell = True",
"# see https://docs.openstack.org/bandit/latest/plugins/subprocess_popen_with_shell_equals_true.html",... | Run a command inside the given folder.
:param string command: the command to execute.
:param string folder: the folder where to execute the command.
:return: the return code of the process.
:rtype: Tuple[int, str] | [
"Run",
"a",
"command",
"inside",
"the",
"given",
"folder",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L163-L177 | train | 51,135 |
Numigi/gitoo | src/core.py | parse_url | def parse_url(url):
""" Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found.
"""
# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3
# we use a try except to make the code compatible with py2 and py3
try:
url = unicode(url)
except NameError:
url = url
parsed = pystache.parse(url)
# pylint: disable=protected-access
variables = (element.key for element in parsed._parse_tree if isinstance(element, _EscapeNode))
return pystache.render(url, {variable: os.environ[variable] for variable in variables}) | python | def parse_url(url):
""" Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found.
"""
# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3
# we use a try except to make the code compatible with py2 and py3
try:
url = unicode(url)
except NameError:
url = url
parsed = pystache.parse(url)
# pylint: disable=protected-access
variables = (element.key for element in parsed._parse_tree if isinstance(element, _EscapeNode))
return pystache.render(url, {variable: os.environ[variable] for variable in variables}) | [
"def",
"parse_url",
"(",
"url",
")",
":",
"# the url has to be a unicode by pystache's design, but the unicode concept has been rewamped in py3",
"# we use a try except to make the code compatible with py2 and py3",
"try",
":",
"url",
"=",
"unicode",
"(",
"url",
")",
"except",
"Nam... | Parse the given url and update it with environment value if required.
:param basestring url:
:rtype: basestring
:raise: KeyError if environment variable is needed but not found. | [
"Parse",
"the",
"given",
"url",
"and",
"update",
"it",
"with",
"environment",
"value",
"if",
"required",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L242-L259 | train | 51,136 |
Numigi/gitoo | src/core.py | Addon._move_modules | def _move_modules(self, temp_repo, destination):
"""Move modules froom the temp directory to the destination.
:param string temp_repo: the folder containing the code.
:param string destination: the folder where the add-on should end up at.
"""
folders = self._get_module_folders(temp_repo)
for folder in folders:
force_move(folder, destination) | python | def _move_modules(self, temp_repo, destination):
"""Move modules froom the temp directory to the destination.
:param string temp_repo: the folder containing the code.
:param string destination: the folder where the add-on should end up at.
"""
folders = self._get_module_folders(temp_repo)
for folder in folders:
force_move(folder, destination) | [
"def",
"_move_modules",
"(",
"self",
",",
"temp_repo",
",",
"destination",
")",
":",
"folders",
"=",
"self",
".",
"_get_module_folders",
"(",
"temp_repo",
")",
"for",
"folder",
"in",
"folders",
":",
"force_move",
"(",
"folder",
",",
"destination",
")"
] | Move modules froom the temp directory to the destination.
:param string temp_repo: the folder containing the code.
:param string destination: the folder where the add-on should end up at. | [
"Move",
"modules",
"froom",
"the",
"temp",
"directory",
"to",
"the",
"destination",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L103-L111 | train | 51,137 |
Numigi/gitoo | src/core.py | Addon._get_module_folders | def _get_module_folders(self, temp_repo):
"""Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules.
"""
paths = (
os.path.join(temp_repo, path) for path in os.listdir(temp_repo)
if self._is_module_included(path)
)
return (path for path in paths if os.path.isdir(path)) | python | def _get_module_folders(self, temp_repo):
"""Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules.
"""
paths = (
os.path.join(temp_repo, path) for path in os.listdir(temp_repo)
if self._is_module_included(path)
)
return (path for path in paths if os.path.isdir(path)) | [
"def",
"_get_module_folders",
"(",
"self",
",",
"temp_repo",
")",
":",
"paths",
"=",
"(",
"os",
".",
"path",
".",
"join",
"(",
"temp_repo",
",",
"path",
")",
"for",
"path",
"in",
"os",
".",
"listdir",
"(",
"temp_repo",
")",
"if",
"self",
".",
"_is_mo... | Get a list of module paths contained in a temp directory.
:param string temp_repo: the folder containing the modules. | [
"Get",
"a",
"list",
"of",
"module",
"paths",
"contained",
"in",
"a",
"temp",
"directory",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L113-L122 | train | 51,138 |
Numigi/gitoo | src/core.py | Addon._is_module_included | def _is_module_included(self, module):
"""Evaluate if the module must be included in the Odoo addons.
:param string module: the name of the module
:rtype: bool
"""
if module in self.exclude_modules:
return False
if self.include_modules is None:
return True
return module in self.include_modules | python | def _is_module_included(self, module):
"""Evaluate if the module must be included in the Odoo addons.
:param string module: the name of the module
:rtype: bool
"""
if module in self.exclude_modules:
return False
if self.include_modules is None:
return True
return module in self.include_modules | [
"def",
"_is_module_included",
"(",
"self",
",",
"module",
")",
":",
"if",
"module",
"in",
"self",
".",
"exclude_modules",
":",
"return",
"False",
"if",
"self",
".",
"include_modules",
"is",
"None",
":",
"return",
"True",
"return",
"module",
"in",
"self",
"... | Evaluate if the module must be included in the Odoo addons.
:param string module: the name of the module
:rtype: bool | [
"Evaluate",
"if",
"the",
"module",
"must",
"be",
"included",
"in",
"the",
"Odoo",
"addons",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L124-L136 | train | 51,139 |
Numigi/gitoo | src/core.py | Base._move_modules | def _move_modules(self, temp_repo, destination):
"""Move odoo modules from the temp directory to the destination.
This step is different from a standard repository. In the base code
of Odoo, the modules are contained in a addons folder at the root
of the git repository. However, when deploying the application,
those modules are placed inside the folder odoo/addons.
1- Move modules from addons/ to odoo/addons/ (with the base module).
2- Move the whole odoo folder to the destination location.
"""
tmp_addons = os.path.join(temp_repo, 'addons')
tmp_odoo_addons = os.path.join(temp_repo, 'odoo/addons')
folders = self._get_module_folders(tmp_addons)
for folder in folders:
force_move(folder, tmp_odoo_addons)
tmp_odoo = os.path.join(temp_repo, 'odoo')
force_move(tmp_odoo, destination) | python | def _move_modules(self, temp_repo, destination):
"""Move odoo modules from the temp directory to the destination.
This step is different from a standard repository. In the base code
of Odoo, the modules are contained in a addons folder at the root
of the git repository. However, when deploying the application,
those modules are placed inside the folder odoo/addons.
1- Move modules from addons/ to odoo/addons/ (with the base module).
2- Move the whole odoo folder to the destination location.
"""
tmp_addons = os.path.join(temp_repo, 'addons')
tmp_odoo_addons = os.path.join(temp_repo, 'odoo/addons')
folders = self._get_module_folders(tmp_addons)
for folder in folders:
force_move(folder, tmp_odoo_addons)
tmp_odoo = os.path.join(temp_repo, 'odoo')
force_move(tmp_odoo, destination) | [
"def",
"_move_modules",
"(",
"self",
",",
"temp_repo",
",",
"destination",
")",
":",
"tmp_addons",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_repo",
",",
"'addons'",
")",
"tmp_odoo_addons",
"=",
"os",
".",
"path",
".",
"join",
"(",
"temp_repo",
",",... | Move odoo modules from the temp directory to the destination.
This step is different from a standard repository. In the base code
of Odoo, the modules are contained in a addons folder at the root
of the git repository. However, when deploying the application,
those modules are placed inside the folder odoo/addons.
1- Move modules from addons/ to odoo/addons/ (with the base module).
2- Move the whole odoo folder to the destination location. | [
"Move",
"odoo",
"modules",
"from",
"the",
"temp",
"directory",
"to",
"the",
"destination",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L142-L160 | train | 51,140 |
Numigi/gitoo | src/core.py | Patch.apply | def apply(self, folder):
""" Merge code from the given repo url to the git repo contained in the given folder.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied.
"""
logger.info("Apply Patch %s@%s (commit %s)", self.url, self.branch, self.commit)
remote_name = 'patch'
commands = [
"git remote add {} {}".format(remote_name, self.url),
"git fetch {} {}".format(remote_name, self.branch),
'git merge {} -m "patch"'.format(self.commit),
"git remote remove {}".format(remote_name),
]
for command in commands:
return_code, stream_data = _run_command_inside_folder(command, folder)
if return_code:
msg = "Could not apply patch from {}@{}: {}. Error: {}".format(
self.url, self.branch, command, stream_data)
logger.error(msg)
raise RuntimeError(msg) | python | def apply(self, folder):
""" Merge code from the given repo url to the git repo contained in the given folder.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied.
"""
logger.info("Apply Patch %s@%s (commit %s)", self.url, self.branch, self.commit)
remote_name = 'patch'
commands = [
"git remote add {} {}".format(remote_name, self.url),
"git fetch {} {}".format(remote_name, self.branch),
'git merge {} -m "patch"'.format(self.commit),
"git remote remove {}".format(remote_name),
]
for command in commands:
return_code, stream_data = _run_command_inside_folder(command, folder)
if return_code:
msg = "Could not apply patch from {}@{}: {}. Error: {}".format(
self.url, self.branch, command, stream_data)
logger.error(msg)
raise RuntimeError(msg) | [
"def",
"apply",
"(",
"self",
",",
"folder",
")",
":",
"logger",
".",
"info",
"(",
"\"Apply Patch %s@%s (commit %s)\"",
",",
"self",
".",
"url",
",",
"self",
".",
"branch",
",",
"self",
".",
"commit",
")",
"remote_name",
"=",
"'patch'",
"commands",
"=",
"... | Merge code from the given repo url to the git repo contained in the given folder.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied. | [
"Merge",
"code",
"from",
"the",
"given",
"repo",
"url",
"to",
"the",
"git",
"repo",
"contained",
"in",
"the",
"given",
"folder",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L193-L213 | train | 51,141 |
Numigi/gitoo | src/core.py | FilePatch.apply | def apply(self, folder):
"""Apply a patch from a git patch file.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied.
"""
logger.info("Apply Patch File %s", self.file_path)
command = "git apply {}".format(self.file_path)
return_code, stream_data = _run_command_inside_folder(command, folder)
if return_code:
msg = "Could not apply patch file at {}. Error: {}".format(self.file_path, stream_data)
logger.error(msg)
raise RuntimeError(msg) | python | def apply(self, folder):
"""Apply a patch from a git patch file.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied.
"""
logger.info("Apply Patch File %s", self.file_path)
command = "git apply {}".format(self.file_path)
return_code, stream_data = _run_command_inside_folder(command, folder)
if return_code:
msg = "Could not apply patch file at {}. Error: {}".format(self.file_path, stream_data)
logger.error(msg)
raise RuntimeError(msg) | [
"def",
"apply",
"(",
"self",
",",
"folder",
")",
":",
"logger",
".",
"info",
"(",
"\"Apply Patch File %s\"",
",",
"self",
".",
"file_path",
")",
"command",
"=",
"\"git apply {}\"",
".",
"format",
"(",
"self",
".",
"file_path",
")",
"return_code",
",",
"str... | Apply a patch from a git patch file.
:param string folder: path of the folder where is the git repo cloned at.
:raise: RuntimeError if the patch could not be applied. | [
"Apply",
"a",
"patch",
"from",
"a",
"git",
"patch",
"file",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/core.py#L226-L239 | train | 51,142 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._set_up_context | def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {} | python | def _set_up_context(cls):
"""Create context to keep all needed variables in."""
cls.context = AttributeDict()
cls.context.new_meta = {}
cls.context.new_transitions = {}
cls.context.new_methods = {} | [
"def",
"_set_up_context",
"(",
"cls",
")",
":",
"cls",
".",
"context",
"=",
"AttributeDict",
"(",
")",
"cls",
".",
"context",
".",
"new_meta",
"=",
"{",
"}",
"cls",
".",
"context",
".",
"new_transitions",
"=",
"{",
"}",
"cls",
".",
"context",
".",
"n... | Create context to keep all needed variables in. | [
"Create",
"context",
"to",
"keep",
"all",
"needed",
"variables",
"in",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L64-L69 | train | 51,143 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._check_states_enum | def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.') | python | def _check_states_enum(cls):
"""Check if states enum exists and is proper one."""
states_enum_name = cls.context.get_config('states_enum_name')
try:
cls.context['states_enum'] = getattr(
cls.context.new_class, states_enum_name)
except AttributeError:
raise ValueError('No states enum given!')
proper = True
try:
if not issubclass(cls.context.states_enum, Enum):
proper = False
except TypeError:
proper = False
if not proper:
raise ValueError(
'Please provide enum instance to define available states.') | [
"def",
"_check_states_enum",
"(",
"cls",
")",
":",
"states_enum_name",
"=",
"cls",
".",
"context",
".",
"get_config",
"(",
"'states_enum_name'",
")",
"try",
":",
"cls",
".",
"context",
"[",
"'states_enum'",
"]",
"=",
"getattr",
"(",
"cls",
".",
"context",
... | Check if states enum exists and is proper one. | [
"Check",
"if",
"states",
"enum",
"exists",
"and",
"is",
"proper",
"one",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L72-L90 | train | 51,144 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._check_if_states_are_strings | def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
) | python | def _check_if_states_are_strings(cls):
"""Check if all states are strings."""
for item in list(cls.context.states_enum):
if not isinstance(item.value, six.string_types):
raise ValueError(
'Item {name} is not string. Only strings are allowed.'
.format(name=item.name)
) | [
"def",
"_check_if_states_are_strings",
"(",
"cls",
")",
":",
"for",
"item",
"in",
"list",
"(",
"cls",
".",
"context",
".",
"states_enum",
")",
":",
"if",
"not",
"isinstance",
"(",
"item",
".",
"value",
",",
"six",
".",
"string_types",
")",
":",
"raise",
... | Check if all states are strings. | [
"Check",
"if",
"all",
"states",
"are",
"strings",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L93-L100 | train | 51,145 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._check_state_value | def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value | python | def _check_state_value(cls):
"""Check initial state value - if is proper and translate it.
Initial state is required.
"""
state_value = cls.context.get_config('initial_state', None)
state_value = state_value or getattr(
cls.context.new_class, cls.context.state_name, None
)
if not state_value:
raise ValueError(
"Empty state is disallowed, yet no initial state is given!"
)
state_value = (
cls.context
.new_meta['translator']
.translate(state_value)
)
cls.context.state_value = state_value | [
"def",
"_check_state_value",
"(",
"cls",
")",
":",
"state_value",
"=",
"cls",
".",
"context",
".",
"get_config",
"(",
"'initial_state'",
",",
"None",
")",
"state_value",
"=",
"state_value",
"or",
"getattr",
"(",
"cls",
".",
"context",
".",
"new_class",
",",
... | Check initial state value - if is proper and translate it.
Initial state is required. | [
"Check",
"initial",
"state",
"value",
"-",
"if",
"is",
"proper",
"and",
"translate",
"it",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L103-L122 | train | 51,146 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._add_standard_attributes | def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_) | python | def _add_standard_attributes(cls):
"""Add attributes common to all state machines.
These are methods for setting and checking state etc.
"""
setattr(
cls.context.new_class,
cls.context.new_meta['state_attribute_name'],
cls.context.state_value)
setattr(
cls.context.new_class,
cls.context.state_name,
utils.state_property)
setattr(cls.context.new_class, 'is_', utils.is_)
setattr(cls.context.new_class, 'can_be_', utils.can_be_)
setattr(cls.context.new_class, 'set_', utils.set_) | [
"def",
"_add_standard_attributes",
"(",
"cls",
")",
":",
"setattr",
"(",
"cls",
".",
"context",
".",
"new_class",
",",
"cls",
".",
"context",
".",
"new_meta",
"[",
"'state_attribute_name'",
"]",
",",
"cls",
".",
"context",
".",
"state_value",
")",
"setattr",... | Add attributes common to all state machines.
These are methods for setting and checking state etc. | [
"Add",
"attributes",
"common",
"to",
"all",
"state",
"machines",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L125-L142 | train | 51,147 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._generate_standard_transitions | def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set() | python | def _generate_standard_transitions(cls):
"""Generate methods used for transitions."""
allowed_transitions = cls.context.get_config('transitions', {})
for key, transitions in allowed_transitions.items():
key = cls.context.new_meta['translator'].translate(key)
new_transitions = set()
for trans in transitions:
if not isinstance(trans, Enum):
trans = cls.context.new_meta['translator'].translate(trans)
new_transitions.add(trans)
cls.context.new_transitions[key] = new_transitions
for state in cls.context.states_enum:
if state not in cls.context.new_transitions:
cls.context.new_transitions[state] = set() | [
"def",
"_generate_standard_transitions",
"(",
"cls",
")",
":",
"allowed_transitions",
"=",
"cls",
".",
"context",
".",
"get_config",
"(",
"'transitions'",
",",
"{",
"}",
")",
"for",
"key",
",",
"transitions",
"in",
"allowed_transitions",
".",
"items",
"(",
")"... | Generate methods used for transitions. | [
"Generate",
"methods",
"used",
"for",
"transitions",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L145-L161 | train | 51,148 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._generate_standard_methods | def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set | python | def _generate_standard_methods(cls):
"""Generate standard setters, getters and checkers."""
for state in cls.context.states_enum:
getter_name = 'is_{name}'.format(name=state.value)
cls.context.new_methods[getter_name] = utils.generate_getter(state)
setter_name = 'set_{name}'.format(name=state.value)
cls.context.new_methods[setter_name] = utils.generate_setter(state)
checker_name = 'can_be_{name}'.format(name=state.value)
checker = utils.generate_checker(state)
cls.context.new_methods[checker_name] = checker
cls.context.new_methods['actual_state'] = utils.actual_state
cls.context.new_methods['as_enum'] = utils.as_enum
cls.context.new_methods['force_set'] = utils.force_set | [
"def",
"_generate_standard_methods",
"(",
"cls",
")",
":",
"for",
"state",
"in",
"cls",
".",
"context",
".",
"states_enum",
":",
"getter_name",
"=",
"'is_{name}'",
".",
"format",
"(",
"name",
"=",
"state",
".",
"value",
")",
"cls",
".",
"context",
".",
"... | Generate standard setters, getters and checkers. | [
"Generate",
"standard",
"setters",
"getters",
"and",
"checkers",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L164-L179 | train | 51,149 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._add_new_methods | def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method) | python | def _add_new_methods(cls):
"""Add all generated methods to result class."""
for name, method in cls.context.new_methods.items():
if hasattr(cls.context.new_class, name):
raise ValueError(
"Name collision in state machine class - '{name}'."
.format(name)
)
setattr(cls.context.new_class, name, method) | [
"def",
"_add_new_methods",
"(",
"cls",
")",
":",
"for",
"name",
",",
"method",
"in",
"cls",
".",
"context",
".",
"new_methods",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"cls",
".",
"context",
".",
"new_class",
",",
"name",
")",
":",
"raise"... | Add all generated methods to result class. | [
"Add",
"all",
"generated",
"methods",
"to",
"result",
"class",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L235-L244 | train | 51,150 |
beregond/super_state_machine | super_state_machine/machines.py | StateMachineMetaclass._set_complete_option | def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete | python | def _set_complete_option(cls):
"""Check and set complete option."""
get_config = cls.context.get_config
complete = get_config('complete', None)
if complete is None:
conditions = [
get_config('transitions', False),
get_config('named_transitions', False),
]
complete = not any(conditions)
cls.context.new_meta['complete'] = complete | [
"def",
"_set_complete_option",
"(",
"cls",
")",
":",
"get_config",
"=",
"cls",
".",
"context",
".",
"get_config",
"complete",
"=",
"get_config",
"(",
"'complete'",
",",
"None",
")",
"if",
"complete",
"is",
"None",
":",
"conditions",
"=",
"[",
"get_config",
... | Check and set complete option. | [
"Check",
"and",
"set",
"complete",
"option",
"."
] | 31ad527f4e6b7a01e315ce865735ca18957c223e | https://github.com/beregond/super_state_machine/blob/31ad527f4e6b7a01e315ce865735ca18957c223e/super_state_machine/machines.py#L247-L258 | train | 51,151 |
BlueBrain/nat | nat/utils.py | data_directory | def data_directory():
"""Return the absolute path to the directory containing the package data."""
package_directory = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_directory, "data") | python | def data_directory():
"""Return the absolute path to the directory containing the package data."""
package_directory = os.path.abspath(os.path.dirname(__file__))
return os.path.join(package_directory, "data") | [
"def",
"data_directory",
"(",
")",
":",
"package_directory",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"package_directory",
",",
"\"data\"",
... | Return the absolute path to the directory containing the package data. | [
"Return",
"the",
"absolute",
"path",
"to",
"the",
"directory",
"containing",
"the",
"package",
"data",
"."
] | 0934f06e48e6efedf55a9617b15becae0d7b277c | https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/utils.py#L16-L19 | train | 51,152 |
ttinies/sc2gameMapRepo | sc2maptool/functions.py | filterMapAttrs | def filterMapAttrs(records=getIndex(), **tags):
"""matches available maps if their attributes match as specified"""
if len(tags) == 0: return records # otherwise if unspecified, all given records match
ret = []
for record in records: # attempt to match attributes
if matchRecordAttrs(record, tags):
ret.append(record)
return ret | python | def filterMapAttrs(records=getIndex(), **tags):
"""matches available maps if their attributes match as specified"""
if len(tags) == 0: return records # otherwise if unspecified, all given records match
ret = []
for record in records: # attempt to match attributes
if matchRecordAttrs(record, tags):
ret.append(record)
return ret | [
"def",
"filterMapAttrs",
"(",
"records",
"=",
"getIndex",
"(",
")",
",",
"*",
"*",
"tags",
")",
":",
"if",
"len",
"(",
"tags",
")",
"==",
"0",
":",
"return",
"records",
"# otherwise if unspecified, all given records match",
"ret",
"=",
"[",
"]",
"for",
"re... | matches available maps if their attributes match as specified | [
"matches",
"available",
"maps",
"if",
"their",
"attributes",
"match",
"as",
"specified"
] | 3a215067fae8f86f6a3ffe37272fbd7a5461cfab | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/functions.py#L29-L36 | train | 51,153 |
ttinies/sc2gameMapRepo | sc2maptool/functions.py | matchRecordAttrs | def matchRecordAttrs(mapobj, attrs):
"""attempt to match given attributes against a single map object's attributes"""
for k,v in iteritems(attrs):
try: val = getattr(mapobj, k)
except AttributeError: # k isn't an attr of record
if bool(v): return False # if k doesn't exist in mapobj but was required, no match
else: continue # otherwise ignore attributes that aren't defined for the given map record
if val != v: return False # if any criteria matches, it's considered a match
return True | python | def matchRecordAttrs(mapobj, attrs):
"""attempt to match given attributes against a single map object's attributes"""
for k,v in iteritems(attrs):
try: val = getattr(mapobj, k)
except AttributeError: # k isn't an attr of record
if bool(v): return False # if k doesn't exist in mapobj but was required, no match
else: continue # otherwise ignore attributes that aren't defined for the given map record
if val != v: return False # if any criteria matches, it's considered a match
return True | [
"def",
"matchRecordAttrs",
"(",
"mapobj",
",",
"attrs",
")",
":",
"for",
"k",
",",
"v",
"in",
"iteritems",
"(",
"attrs",
")",
":",
"try",
":",
"val",
"=",
"getattr",
"(",
"mapobj",
",",
"k",
")",
"except",
"AttributeError",
":",
"# k isn't an attr of rec... | attempt to match given attributes against a single map object's attributes | [
"attempt",
"to",
"match",
"given",
"attributes",
"against",
"a",
"single",
"map",
"object",
"s",
"attributes"
] | 3a215067fae8f86f6a3ffe37272fbd7a5461cfab | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/functions.py#L40-L48 | train | 51,154 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_boolean | def to_boolean(value, ctx):
"""
Tries conversion of any value to a boolean
"""
if isinstance(value, bool):
return value
elif isinstance(value, int):
return value != 0
elif isinstance(value, Decimal):
return value != Decimal(0)
elif isinstance(value, str):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
elif isinstance(value, datetime.date) or isinstance(value, datetime.time):
return True
raise EvaluationError("Can't convert '%s' to a boolean" % str(value)) | python | def to_boolean(value, ctx):
"""
Tries conversion of any value to a boolean
"""
if isinstance(value, bool):
return value
elif isinstance(value, int):
return value != 0
elif isinstance(value, Decimal):
return value != Decimal(0)
elif isinstance(value, str):
value = value.lower()
if value == 'true':
return True
elif value == 'false':
return False
elif isinstance(value, datetime.date) or isinstance(value, datetime.time):
return True
raise EvaluationError("Can't convert '%s' to a boolean" % str(value)) | [
"def",
"to_boolean",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"value",
"!=",
"0",
"elif",
"isinstance",
"(",
"v... | Tries conversion of any value to a boolean | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"boolean"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L7-L26 | train | 51,155 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_integer | def to_integer(value, ctx):
"""
Tries conversion of any value to an integer
"""
if isinstance(value, bool):
return 1 if value else 0
elif isinstance(value, int):
return value
elif isinstance(value, Decimal):
try:
val = int(value.to_integral_exact(ROUND_HALF_UP))
if isinstance(val, int):
return val
except ArithmeticError:
pass
elif isinstance(value, str):
try:
return int(value)
except ValueError:
pass
raise EvaluationError("Can't convert '%s' to an integer" % str(value)) | python | def to_integer(value, ctx):
"""
Tries conversion of any value to an integer
"""
if isinstance(value, bool):
return 1 if value else 0
elif isinstance(value, int):
return value
elif isinstance(value, Decimal):
try:
val = int(value.to_integral_exact(ROUND_HALF_UP))
if isinstance(val, int):
return val
except ArithmeticError:
pass
elif isinstance(value, str):
try:
return int(value)
except ValueError:
pass
raise EvaluationError("Can't convert '%s' to an integer" % str(value)) | [
"def",
"to_integer",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"1",
"if",
"value",
"else",
"0",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"value",
"elif",
"isinsta... | Tries conversion of any value to an integer | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"an",
"integer"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L29-L50 | train | 51,156 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_decimal | def to_decimal(value, ctx):
"""
Tries conversion of any value to a decimal
"""
if isinstance(value, bool):
return Decimal(1) if value else Decimal(0)
elif isinstance(value, int):
return Decimal(value)
elif isinstance(value, Decimal):
return value
elif isinstance(value, str):
try:
return Decimal(value)
except Exception:
pass
raise EvaluationError("Can't convert '%s' to a decimal" % str(value)) | python | def to_decimal(value, ctx):
"""
Tries conversion of any value to a decimal
"""
if isinstance(value, bool):
return Decimal(1) if value else Decimal(0)
elif isinstance(value, int):
return Decimal(value)
elif isinstance(value, Decimal):
return value
elif isinstance(value, str):
try:
return Decimal(value)
except Exception:
pass
raise EvaluationError("Can't convert '%s' to a decimal" % str(value)) | [
"def",
"to_decimal",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"Decimal",
"(",
"1",
")",
"if",
"value",
"else",
"Decimal",
"(",
"0",
")",
"elif",
"isinstance",
"(",
"value",
",",
"int",
"... | Tries conversion of any value to a decimal | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"decimal"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L53-L69 | train | 51,157 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_string | def to_string(value, ctx):
"""
Tries conversion of any value to a string
"""
if isinstance(value, bool):
return "TRUE" if value else "FALSE"
elif isinstance(value, int):
return str(value)
elif isinstance(value, Decimal):
return format_decimal(value)
elif isinstance(value, str):
return value
elif type(value) == datetime.date:
return value.strftime(ctx.get_date_format(False))
elif isinstance(value, datetime.time):
return value.strftime('%H:%M')
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone).isoformat()
raise EvaluationError("Can't convert '%s' to a string" % str(value)) | python | def to_string(value, ctx):
"""
Tries conversion of any value to a string
"""
if isinstance(value, bool):
return "TRUE" if value else "FALSE"
elif isinstance(value, int):
return str(value)
elif isinstance(value, Decimal):
return format_decimal(value)
elif isinstance(value, str):
return value
elif type(value) == datetime.date:
return value.strftime(ctx.get_date_format(False))
elif isinstance(value, datetime.time):
return value.strftime('%H:%M')
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone).isoformat()
raise EvaluationError("Can't convert '%s' to a string" % str(value)) | [
"def",
"to_string",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"return",
"\"TRUE\"",
"if",
"value",
"else",
"\"FALSE\"",
"elif",
"isinstance",
"(",
"value",
",",
"int",
")",
":",
"return",
"str",
"(",
... | Tries conversion of any value to a string | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"string"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L72-L91 | train | 51,158 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_date | def to_date(value, ctx):
"""
Tries conversion of any value to a date
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return to_date(temporal, ctx)
elif type(value) == datetime.date:
return value
elif isinstance(value, datetime.datetime):
return value.date() # discard time
raise EvaluationError("Can't convert '%s' to a date" % str(value)) | python | def to_date(value, ctx):
"""
Tries conversion of any value to a date
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return to_date(temporal, ctx)
elif type(value) == datetime.date:
return value
elif isinstance(value, datetime.datetime):
return value.date() # discard time
raise EvaluationError("Can't convert '%s' to a date" % str(value)) | [
"def",
"to_date",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"temporal",
"=",
"ctx",
".",
"get_date_parser",
"(",
")",
".",
"auto",
"(",
"value",
")",
"if",
"temporal",
"is",
"not",
"None",
":",
"ret... | Tries conversion of any value to a date | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"date"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L94-L107 | train | 51,159 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_datetime | def to_datetime(value, ctx):
"""
Tries conversion of any value to a datetime
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return to_datetime(temporal, ctx)
elif type(value) == datetime.date:
return ctx.timezone.localize(datetime.datetime.combine(value, datetime.time(0, 0)))
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone)
raise EvaluationError("Can't convert '%s' to a datetime" % str(value)) | python | def to_datetime(value, ctx):
"""
Tries conversion of any value to a datetime
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return to_datetime(temporal, ctx)
elif type(value) == datetime.date:
return ctx.timezone.localize(datetime.datetime.combine(value, datetime.time(0, 0)))
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone)
raise EvaluationError("Can't convert '%s' to a datetime" % str(value)) | [
"def",
"to_datetime",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"temporal",
"=",
"ctx",
".",
"get_date_parser",
"(",
")",
".",
"auto",
"(",
"value",
")",
"if",
"temporal",
"is",
"not",
"None",
":",
... | Tries conversion of any value to a datetime | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"datetime"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L110-L123 | train | 51,160 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_date_or_datetime | def to_date_or_datetime(value, ctx):
"""
Tries conversion of any value to a date or datetime
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return temporal
elif type(value) == datetime.date:
return value
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone)
raise EvaluationError("Can't convert '%s' to a date or datetime" % str(value)) | python | def to_date_or_datetime(value, ctx):
"""
Tries conversion of any value to a date or datetime
"""
if isinstance(value, str):
temporal = ctx.get_date_parser().auto(value)
if temporal is not None:
return temporal
elif type(value) == datetime.date:
return value
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone)
raise EvaluationError("Can't convert '%s' to a date or datetime" % str(value)) | [
"def",
"to_date_or_datetime",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"temporal",
"=",
"ctx",
".",
"get_date_parser",
"(",
")",
".",
"auto",
"(",
"value",
")",
"if",
"temporal",
"is",
"not",
"None",
... | Tries conversion of any value to a date or datetime | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"date",
"or",
"datetime"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L126-L139 | train | 51,161 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_time | def to_time(value, ctx):
"""
Tries conversion of any value to a time
"""
if isinstance(value, str):
time = ctx.get_date_parser().time(value)
if time is not None:
return time
elif isinstance(value, datetime.time):
return value
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone).time()
raise EvaluationError("Can't convert '%s' to a time" % str(value)) | python | def to_time(value, ctx):
"""
Tries conversion of any value to a time
"""
if isinstance(value, str):
time = ctx.get_date_parser().time(value)
if time is not None:
return time
elif isinstance(value, datetime.time):
return value
elif isinstance(value, datetime.datetime):
return value.astimezone(ctx.timezone).time()
raise EvaluationError("Can't convert '%s' to a time" % str(value)) | [
"def",
"to_time",
"(",
"value",
",",
"ctx",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"time",
"=",
"ctx",
".",
"get_date_parser",
"(",
")",
".",
"time",
"(",
"value",
")",
"if",
"time",
"is",
"not",
"None",
":",
"return",
... | Tries conversion of any value to a time | [
"Tries",
"conversion",
"of",
"any",
"value",
"to",
"a",
"time"
] | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L142-L155 | train | 51,162 |
rapidpro/expressions | python/temba_expressions/conversions.py | to_same | def to_same(value1, value2, ctx):
"""
Converts a pair of arguments to their most-likely types. This deviates from Excel which doesn't auto convert values
but is necessary for us to intuitively handle contact fields which don't use the correct value type
"""
if type(value1) == type(value2):
return value1, value2
try:
# try converting to two decimals
return to_decimal(value1, ctx), to_decimal(value2, ctx)
except EvaluationError:
pass
try:
# try converting to two dates
d1, d2 = to_date_or_datetime(value1, ctx), to_date_or_datetime(value2, ctx)
# if either one is a datetime, then the other needs to become a datetime
if type(value1) != type(value2):
d1, d2 = to_datetime(d1, ctx), to_datetime(d2, ctx)
return d1, d2
except EvaluationError:
pass
# try converting to two strings
return to_string(value1, ctx), to_string(value2, ctx) | python | def to_same(value1, value2, ctx):
"""
Converts a pair of arguments to their most-likely types. This deviates from Excel which doesn't auto convert values
but is necessary for us to intuitively handle contact fields which don't use the correct value type
"""
if type(value1) == type(value2):
return value1, value2
try:
# try converting to two decimals
return to_decimal(value1, ctx), to_decimal(value2, ctx)
except EvaluationError:
pass
try:
# try converting to two dates
d1, d2 = to_date_or_datetime(value1, ctx), to_date_or_datetime(value2, ctx)
# if either one is a datetime, then the other needs to become a datetime
if type(value1) != type(value2):
d1, d2 = to_datetime(d1, ctx), to_datetime(d2, ctx)
return d1, d2
except EvaluationError:
pass
# try converting to two strings
return to_string(value1, ctx), to_string(value2, ctx) | [
"def",
"to_same",
"(",
"value1",
",",
"value2",
",",
"ctx",
")",
":",
"if",
"type",
"(",
"value1",
")",
"==",
"type",
"(",
"value2",
")",
":",
"return",
"value1",
",",
"value2",
"try",
":",
"# try converting to two decimals",
"return",
"to_decimal",
"(",
... | Converts a pair of arguments to their most-likely types. This deviates from Excel which doesn't auto convert values
but is necessary for us to intuitively handle contact fields which don't use the correct value type | [
"Converts",
"a",
"pair",
"of",
"arguments",
"to",
"their",
"most",
"-",
"likely",
"types",
".",
"This",
"deviates",
"from",
"Excel",
"which",
"doesn",
"t",
"auto",
"convert",
"values",
"but",
"is",
"necessary",
"for",
"us",
"to",
"intuitively",
"handle",
"... | b03d91ec58fc328960bce90ecb5fa49dcf467627 | https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/conversions.py#L158-L184 | train | 51,163 |
lablup/backend.ai-common | src/ai/backend/common/identity.py | is_containerized | def is_containerized() -> bool:
'''
Check if I am running inside a Linux container.
'''
try:
cginfo = Path('/proc/self/cgroup').read_text()
if '/docker/' in cginfo or '/lxc/' in cginfo:
return True
except IOError:
return False | python | def is_containerized() -> bool:
'''
Check if I am running inside a Linux container.
'''
try:
cginfo = Path('/proc/self/cgroup').read_text()
if '/docker/' in cginfo or '/lxc/' in cginfo:
return True
except IOError:
return False | [
"def",
"is_containerized",
"(",
")",
"->",
"bool",
":",
"try",
":",
"cginfo",
"=",
"Path",
"(",
"'/proc/self/cgroup'",
")",
".",
"read_text",
"(",
")",
"if",
"'/docker/'",
"in",
"cginfo",
"or",
"'/lxc/'",
"in",
"cginfo",
":",
"return",
"True",
"except",
... | Check if I am running inside a Linux container. | [
"Check",
"if",
"I",
"am",
"running",
"inside",
"a",
"Linux",
"container",
"."
] | 20b3a2551ee5bb3b88e7836471bc244a70ad0ae6 | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/identity.py#L24-L33 | train | 51,164 |
lablup/backend.ai-common | src/ai/backend/common/identity.py | detect_cloud | def detect_cloud() -> str:
'''
Detect the cloud provider where I am running on.
'''
# NOTE: Contributions are welcome!
# Please add other cloud providers such as Rackspace, IBM BlueMix, etc.
if sys.platform.startswith('linux'):
# Google Cloud Platform or Amazon AWS (hvm)
try:
# AWS Nitro-based instances
mb = Path('/sys/devices/virtual/dmi/id/board_vendor').read_text().lower()
if 'amazon' in mb:
return 'amazon'
except IOError:
pass
try:
bios = Path('/sys/devices/virtual/dmi/id/bios_version').read_text().lower()
if 'google' in bios:
return 'google'
if 'amazon' in bios:
return 'amazon'
except IOError:
pass
# Microsoft Azure
# https://gallery.technet.microsoft.com/scriptcenter/Detect-Windows-Azure-aed06d51
# TODO: this only works with Debian/Ubuntu instances.
# TODO: this does not work inside containers.
try:
dhcp = Path('/var/lib/dhcp/dhclient.eth0.leases').read_text()
if 'unknown-245' in dhcp:
return 'azure'
# alternative method is to read /var/lib/waagent/GoalState.1.xml
# but it requires sudo privilege.
except IOError:
pass
else:
log.warning('Cloud detection is implemented for Linux only yet.')
return None | python | def detect_cloud() -> str:
'''
Detect the cloud provider where I am running on.
'''
# NOTE: Contributions are welcome!
# Please add other cloud providers such as Rackspace, IBM BlueMix, etc.
if sys.platform.startswith('linux'):
# Google Cloud Platform or Amazon AWS (hvm)
try:
# AWS Nitro-based instances
mb = Path('/sys/devices/virtual/dmi/id/board_vendor').read_text().lower()
if 'amazon' in mb:
return 'amazon'
except IOError:
pass
try:
bios = Path('/sys/devices/virtual/dmi/id/bios_version').read_text().lower()
if 'google' in bios:
return 'google'
if 'amazon' in bios:
return 'amazon'
except IOError:
pass
# Microsoft Azure
# https://gallery.technet.microsoft.com/scriptcenter/Detect-Windows-Azure-aed06d51
# TODO: this only works with Debian/Ubuntu instances.
# TODO: this does not work inside containers.
try:
dhcp = Path('/var/lib/dhcp/dhclient.eth0.leases').read_text()
if 'unknown-245' in dhcp:
return 'azure'
# alternative method is to read /var/lib/waagent/GoalState.1.xml
# but it requires sudo privilege.
except IOError:
pass
else:
log.warning('Cloud detection is implemented for Linux only yet.')
return None | [
"def",
"detect_cloud",
"(",
")",
"->",
"str",
":",
"# NOTE: Contributions are welcome!",
"# Please add other cloud providers such as Rackspace, IBM BlueMix, etc.",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"'linux'",
")",
":",
"# Google Cloud Platform or Amazon AWS ... | Detect the cloud provider where I am running on. | [
"Detect",
"the",
"cloud",
"provider",
"where",
"I",
"am",
"running",
"on",
"."
] | 20b3a2551ee5bb3b88e7836471bc244a70ad0ae6 | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/identity.py#L36-L73 | train | 51,165 |
RI-imaging/nrefocus | nrefocus/_propagate.py | refocus | def refocus(field, d, nm, res, method="helmholtz", num_cpus=1, padding=True):
"""Refocus a 1D or 2D field
Parameters
----------
field : 1d or 2d array
1D or 2D background corrected electric field (Ex/BEx)
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : int
Not implemented. Only one CPU is used.
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field at `d`.
"""
# FFT of field
fshape = len(field.shape)
assert fshape in [1, 2], "Dimension of `field` must be 1 or 2."
func = fft_propagate
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
if name in loc:
vardict[name] = loc[name]
if padding:
field = pad.pad_add(field)
vardict["fftfield"] = np.fft.fftn(field)
refoc = func(**vardict)
if padding:
refoc = pad.pad_rem(refoc)
return refoc | python | def refocus(field, d, nm, res, method="helmholtz", num_cpus=1, padding=True):
"""Refocus a 1D or 2D field
Parameters
----------
field : 1d or 2d array
1D or 2D background corrected electric field (Ex/BEx)
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : int
Not implemented. Only one CPU is used.
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field at `d`.
"""
# FFT of field
fshape = len(field.shape)
assert fshape in [1, 2], "Dimension of `field` must be 1 or 2."
func = fft_propagate
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
if name in loc:
vardict[name] = loc[name]
if padding:
field = pad.pad_add(field)
vardict["fftfield"] = np.fft.fftn(field)
refoc = func(**vardict)
if padding:
refoc = pad.pad_rem(refoc)
return refoc | [
"def",
"refocus",
"(",
"field",
",",
"d",
",",
"nm",
",",
"res",
",",
"method",
"=",
"\"helmholtz\"",
",",
"num_cpus",
"=",
"1",
",",
"padding",
"=",
"True",
")",
":",
"# FFT of field",
"fshape",
"=",
"len",
"(",
"field",
".",
"shape",
")",
"assert",... | Refocus a 1D or 2D field
Parameters
----------
field : 1d or 2d array
1D or 2D background corrected electric field (Ex/BEx)
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : int
Not implemented. Only one CPU is used.
padding : bool
perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field at `d`. | [
"Refocus",
"a",
"1D",
"or",
"2D",
"field"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_propagate.py#L12-L69 | train | 51,166 |
RI-imaging/nrefocus | nrefocus/_propagate.py | refocus_stack | def refocus_stack(fieldstack, d, nm, res, method="helmholtz",
num_cpus=_cpu_count, copy=True, padding=True):
"""Refocus a stack of 1D or 2D fields
Parameters
----------
fieldstack : 2d or 3d array
Stack of 1D or 2D background corrected electric fields (Ex/BEx).
The first axis iterates through the individual fields.
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : str
Defines the number of CPUs to be used for refocusing.
copy : bool
If False, overwrites input stack.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field stack at `d`.
"""
func = refocus
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
if name in loc.keys():
vardict[name] = loc[name]
# default keyword arguments
func_def = func.__defaults__[::-1]
# child processes should only use one cpu
vardict["num_cpus"] = 1
vardict["padding"] = padding
M = fieldstack.shape[0]
stackargs = list()
# Create individual arglists for all fields
for m in range(M):
kwarg = vardict.copy()
kwarg["field"] = fieldstack[m]
# now we turn the kwarg into an arglist
args = list()
for i, a in enumerate(names[::-1]):
# first set default
if i < len(func_def):
val = func_def[i]
if a in kwarg:
val = kwarg[a]
args.append(val)
stackargs.append(args[::-1])
p = mp.Pool(num_cpus)
result = p.map_async(_refocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
if copy:
data = np.zeros(fieldstack.shape, dtype=result[0].dtype)
else:
data = fieldstack
for m in range(M):
data[m] = result[m]
return data | python | def refocus_stack(fieldstack, d, nm, res, method="helmholtz",
num_cpus=_cpu_count, copy=True, padding=True):
"""Refocus a stack of 1D or 2D fields
Parameters
----------
fieldstack : 2d or 3d array
Stack of 1D or 2D background corrected electric fields (Ex/BEx).
The first axis iterates through the individual fields.
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : str
Defines the number of CPUs to be used for refocusing.
copy : bool
If False, overwrites input stack.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field stack at `d`.
"""
func = refocus
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
if name in loc.keys():
vardict[name] = loc[name]
# default keyword arguments
func_def = func.__defaults__[::-1]
# child processes should only use one cpu
vardict["num_cpus"] = 1
vardict["padding"] = padding
M = fieldstack.shape[0]
stackargs = list()
# Create individual arglists for all fields
for m in range(M):
kwarg = vardict.copy()
kwarg["field"] = fieldstack[m]
# now we turn the kwarg into an arglist
args = list()
for i, a in enumerate(names[::-1]):
# first set default
if i < len(func_def):
val = func_def[i]
if a in kwarg:
val = kwarg[a]
args.append(val)
stackargs.append(args[::-1])
p = mp.Pool(num_cpus)
result = p.map_async(_refocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
if copy:
data = np.zeros(fieldstack.shape, dtype=result[0].dtype)
else:
data = fieldstack
for m in range(M):
data[m] = result[m]
return data | [
"def",
"refocus_stack",
"(",
"fieldstack",
",",
"d",
",",
"nm",
",",
"res",
",",
"method",
"=",
"\"helmholtz\"",
",",
"num_cpus",
"=",
"_cpu_count",
",",
"copy",
"=",
"True",
",",
"padding",
"=",
"True",
")",
":",
"func",
"=",
"refocus",
"names",
"=",
... | Refocus a stack of 1D or 2D fields
Parameters
----------
fieldstack : 2d or 3d array
Stack of 1D or 2D background corrected electric fields (Ex/BEx).
The first axis iterates through the individual fields.
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelenth in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
num_cpus : str
Defines the number of CPUs to be used for refocusing.
copy : bool
If False, overwrites input stack.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionadded:: 0.1.4
Returns
-------
Electric field stack at `d`. | [
"Refocus",
"a",
"stack",
"of",
"1D",
"or",
"2D",
"fields"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_propagate.py#L72-L157 | train | 51,167 |
RI-imaging/nrefocus | nrefocus/_propagate.py | fft_propagate | def fft_propagate(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagates a 1D or 2D Fourier transformed field
Parameters
----------
fftfield : 1-dimensional or 2-dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
fshape = len(fftfield.shape)
assert fshape in [1, 2], "Dimension of `fftfield` must be 1 or 2."
if fshape == 1:
func = fft_propagate_2d
else:
func = fft_propagate_3d
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
vardict[name] = loc[name]
return func(**vardict) | python | def fft_propagate(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagates a 1D or 2D Fourier transformed field
Parameters
----------
fftfield : 1-dimensional or 2-dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
fshape = len(fftfield.shape)
assert fshape in [1, 2], "Dimension of `fftfield` must be 1 or 2."
if fshape == 1:
func = fft_propagate_2d
else:
func = fft_propagate_3d
names = func.__code__.co_varnames[:func.__code__.co_argcount]
loc = locals()
vardict = dict()
for name in names:
vardict[name] = loc[name]
return func(**vardict) | [
"def",
"fft_propagate",
"(",
"fftfield",
",",
"d",
",",
"nm",
",",
"res",
",",
"method",
"=",
"\"helmholtz\"",
",",
"ret_fft",
"=",
"False",
")",
":",
"fshape",
"=",
"len",
"(",
"fftfield",
".",
"shape",
")",
"assert",
"fshape",
"in",
"[",
"1",
",",
... | Propagates a 1D or 2D Fourier transformed field
Parameters
----------
fftfield : 1-dimensional or 2-dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster). | [
"Propagates",
"a",
"1D",
"or",
"2D",
"Fourier",
"transformed",
"field"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_propagate.py#L160-L207 | train | 51,168 |
RI-imaging/nrefocus | nrefocus/_propagate.py | fft_propagate_2d | def fft_propagate_2d(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagate a 1D Fourier transformed field in 2D
Parameters
----------
fftfield : 1d array
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
assert len(fftfield.shape) == 1, "Dimension of `fftfield` must be 1."
km = (2 * np.pi * nm) / res
kx = np.fft.fftfreq(len(fftfield)) * 2 * np.pi
# free space propagator is
if method == "helmholtz":
# exp(i*sqrt(km²-kx²)*d)
# Also subtract incoming plane wave. We are only considering
# the scattered field here.
root_km = km**2 - kx**2
rt0 = (root_km > 0)
# multiply by rt0 (filter in Fourier space)
fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0
elif method == "fresnel":
# exp(i*d*(km-kx²/(2*km))
# fstemp = np.exp(-1j * d * (kx**2/(2*km)))
fstemp = np.exp(-1j * d * (kx**2/(2*km)))
else:
raise ValueError("Unknown method: {}".format(method))
if ret_fft:
return fftfield * fstemp
else:
return np.fft.ifft(fftfield * fstemp) | python | def fft_propagate_2d(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagate a 1D Fourier transformed field in 2D
Parameters
----------
fftfield : 1d array
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
assert len(fftfield.shape) == 1, "Dimension of `fftfield` must be 1."
km = (2 * np.pi * nm) / res
kx = np.fft.fftfreq(len(fftfield)) * 2 * np.pi
# free space propagator is
if method == "helmholtz":
# exp(i*sqrt(km²-kx²)*d)
# Also subtract incoming plane wave. We are only considering
# the scattered field here.
root_km = km**2 - kx**2
rt0 = (root_km > 0)
# multiply by rt0 (filter in Fourier space)
fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0
elif method == "fresnel":
# exp(i*d*(km-kx²/(2*km))
# fstemp = np.exp(-1j * d * (kx**2/(2*km)))
fstemp = np.exp(-1j * d * (kx**2/(2*km)))
else:
raise ValueError("Unknown method: {}".format(method))
if ret_fft:
return fftfield * fstemp
else:
return np.fft.ifft(fftfield * fstemp) | [
"def",
"fft_propagate_2d",
"(",
"fftfield",
",",
"d",
",",
"nm",
",",
"res",
",",
"method",
"=",
"\"helmholtz\"",
",",
"ret_fft",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"fftfield",
".",
"shape",
")",
"==",
"1",
",",
"\"Dimension of `fftfield` must b... | Propagate a 1D Fourier transformed field in 2D
Parameters
----------
fftfield : 1d array
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster). | [
"Propagate",
"a",
"1D",
"Fourier",
"transformed",
"field",
"in",
"2D"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_propagate.py#L210-L265 | train | 51,169 |
RI-imaging/nrefocus | nrefocus/_propagate.py | fft_propagate_3d | def fft_propagate_3d(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagate a 2D Fourier transformed field in 3D
Parameters
----------
fftfield : 2d array
Fourier transform of 2D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
assert len(fftfield.shape) == 2, "Dimension of `fftfield` must be 2."
# if fftfield.shape[0] != fftfield.shape[1]:
# raise NotImplementedError("Field must be square shaped.")
# free space propagator is
# exp(i*sqrt(km**2-kx**2-ky**2)*d)
km = (2 * np.pi * nm) / res
kx = (np.fft.fftfreq(fftfield.shape[0]) * 2 * np.pi).reshape(-1, 1)
ky = (np.fft.fftfreq(fftfield.shape[1]) * 2 * np.pi).reshape(1, -1)
if method == "helmholtz":
# exp(i*sqrt(km²-kx²-ky²)*d)
root_km = km**2 - kx**2 - ky**2
rt0 = (root_km > 0)
# multiply by rt0 (filter in Fourier space)
fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0
elif method == "fresnel":
# exp(i*d*(km-(kx²+ky²)/(2*km))
# fstemp = np.exp(-1j * d * (kx**2+ky**2)/(2*km))
fstemp = np.exp(-1j * d * (kx**2 + ky**2)/(2*km))
else:
raise ValueError("Unknown method: {}".format(method))
# fstemp[np.where(np.isnan(fstemp))] = 0
# Also subtract incoming plane wave. We are only considering
# the scattered field here.
if ret_fft:
return fftfield * fstemp
else:
return np.fft.ifft2(fftfield * fstemp) | python | def fft_propagate_3d(fftfield, d, nm, res, method="helmholtz",
ret_fft=False):
"""Propagate a 2D Fourier transformed field in 3D
Parameters
----------
fftfield : 2d array
Fourier transform of 2D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster).
"""
assert len(fftfield.shape) == 2, "Dimension of `fftfield` must be 2."
# if fftfield.shape[0] != fftfield.shape[1]:
# raise NotImplementedError("Field must be square shaped.")
# free space propagator is
# exp(i*sqrt(km**2-kx**2-ky**2)*d)
km = (2 * np.pi * nm) / res
kx = (np.fft.fftfreq(fftfield.shape[0]) * 2 * np.pi).reshape(-1, 1)
ky = (np.fft.fftfreq(fftfield.shape[1]) * 2 * np.pi).reshape(1, -1)
if method == "helmholtz":
# exp(i*sqrt(km²-kx²-ky²)*d)
root_km = km**2 - kx**2 - ky**2
rt0 = (root_km > 0)
# multiply by rt0 (filter in Fourier space)
fstemp = np.exp(1j * (np.sqrt(root_km * rt0) - km) * d) * rt0
elif method == "fresnel":
# exp(i*d*(km-(kx²+ky²)/(2*km))
# fstemp = np.exp(-1j * d * (kx**2+ky**2)/(2*km))
fstemp = np.exp(-1j * d * (kx**2 + ky**2)/(2*km))
else:
raise ValueError("Unknown method: {}".format(method))
# fstemp[np.where(np.isnan(fstemp))] = 0
# Also subtract incoming plane wave. We are only considering
# the scattered field here.
if ret_fft:
return fftfield * fstemp
else:
return np.fft.ifft2(fftfield * fstemp) | [
"def",
"fft_propagate_3d",
"(",
"fftfield",
",",
"d",
",",
"nm",
",",
"res",
",",
"method",
"=",
"\"helmholtz\"",
",",
"ret_fft",
"=",
"False",
")",
":",
"assert",
"len",
"(",
"fftfield",
".",
"shape",
")",
"==",
"2",
",",
"\"Dimension of `fftfield` must b... | Propagate a 2D Fourier transformed field in 3D
Parameters
----------
fftfield : 2d array
Fourier transform of 2D Electric field component
d : float
Distance to be propagated in pixels (negative for backwards)
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation;
one of
- "helmholtz" : the optical transfer function `exp(idkₘ(M-1))`
- "fresnel" : paraxial approximation `exp(idk²/kₘ)`
ret_fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space.
Returns
-------
Electric field at `d`. If `ret_fft` is True, then the
Fourier transform of the electric field will be returned (faster). | [
"Propagate",
"a",
"2D",
"Fourier",
"transformed",
"field",
"in",
"3D"
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_propagate.py#L268-L326 | train | 51,170 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients.
"""
if metric == "average gradient":
def metric_func(x): return metrics.average_gradient(np.abs(x))
elif metric == "rms contrast":
def metric_func(x): return -metrics.contrast_rms(np.angle(x))
elif metric == "spectrum":
def metric_func(x): return metrics.spectral(np.abs(x), res)
else:
raise ValueError("No such metric: {}".format(metric))
field, d, grad = minimize_metric(field, metric_func, nm, res, ival,
roi=roi, padding=padding)
ret_list = [field]
if ret_d:
ret_list += [d]
if ret_grad:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | python | def autofocus(field, nm, res, ival, roi=None,
metric="average gradient", padding=True,
ret_d=False, ret_grad=False, num_cpus=1):
"""Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients.
"""
if metric == "average gradient":
def metric_func(x): return metrics.average_gradient(np.abs(x))
elif metric == "rms contrast":
def metric_func(x): return -metrics.contrast_rms(np.angle(x))
elif metric == "spectrum":
def metric_func(x): return metrics.spectral(np.abs(x), res)
else:
raise ValueError("No such metric: {}".format(metric))
field, d, grad = minimize_metric(field, metric_func, nm, res, ival,
roi=roi, padding=padding)
ret_list = [field]
if ret_d:
ret_list += [d]
if ret_grad:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | [
"def",
"autofocus",
"(",
"field",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"ret_d",
"=",
"False",
",",
"ret_grad",
"=",
"False",
",",
"num_cpus",
"=",
"... | Numerical autofocusing of a field using the Helmholtz equation.
Parameters
----------
field : 1d or 2d ndarray
Electric field is BG-Corrected, i.e. field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
roi : rectangular region of interest (x1, y1, x2, y2)
Region of interest of `field` for which the metric will be
minimized. If not given, the entire `field` will be used.
metric : str
- "average gradient" : average gradient metric of amplitude
- "rms contrast" : RMS contrast of phase data
- "spectrum" : sum of filtered Fourier coefficients
padding: bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
red_d : bool
Return the autofocusing distance in pixels. Defaults to False.
red_grad : bool
Return the computed gradients as a list.
num_cpus : int
Not implemented.
Returns
-------
field, [d, [grad]]
The focused field and optionally, the optimal focusing distance and
the computed gradients. | [
"Numerical",
"autofocusing",
"of",
"a",
"field",
"using",
"the",
"Helmholtz",
"equation",
"."
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L19-L83 | train | 51,171 |
RI-imaging/nrefocus | nrefocus/_autofocus.py | autofocus_stack | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None)
"""
dopt = list()
grad = list()
M = fieldstack.shape[0]
# setup arguments
stackargs = list()
for s in range(M):
stackargs.append([fieldstack[s].copy(copy), nm, res, ival,
roi, metric, padding, True, True, 1])
# perform first pass
p = mp.Pool(num_cpus)
result = p.map_async(_autofocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
# result = []
# for arg in stackargs:
# result += _autofocus_wrapper(arg)
newstack = np.zeros(fieldstack.shape, dtype=fieldstack.dtype)
for s in range(M):
field, ds, gs = result[s]
dopt.append(ds)
grad.append(gs)
newstack[s] = field
# perform second pass if `same_dist` is True
if same_dist:
# find average dopt
davg = np.average(dopt)
newstack = refocus_stack(fieldstack, davg, nm, res,
num_cpus=num_cpus, copy=copy,
padding=padding)
ret_list = [newstack]
if ret_ds:
ret_list += [dopt]
if ret_grads:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | python | def autofocus_stack(fieldstack, nm, res, ival, roi=None,
metric="average gradient", padding=True,
same_dist=False, ret_ds=False, ret_grads=False,
num_cpus=_cpu_count, copy=True):
"""Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None)
"""
dopt = list()
grad = list()
M = fieldstack.shape[0]
# setup arguments
stackargs = list()
for s in range(M):
stackargs.append([fieldstack[s].copy(copy), nm, res, ival,
roi, metric, padding, True, True, 1])
# perform first pass
p = mp.Pool(num_cpus)
result = p.map_async(_autofocus_wrapper, stackargs).get()
p.close()
p.terminate()
p.join()
# result = []
# for arg in stackargs:
# result += _autofocus_wrapper(arg)
newstack = np.zeros(fieldstack.shape, dtype=fieldstack.dtype)
for s in range(M):
field, ds, gs = result[s]
dopt.append(ds)
grad.append(gs)
newstack[s] = field
# perform second pass if `same_dist` is True
if same_dist:
# find average dopt
davg = np.average(dopt)
newstack = refocus_stack(fieldstack, davg, nm, res,
num_cpus=num_cpus, copy=copy,
padding=padding)
ret_list = [newstack]
if ret_ds:
ret_list += [dopt]
if ret_grads:
ret_list += [grad]
if len(ret_list) == 1:
return ret_list[0]
else:
return tuple(ret_list) | [
"def",
"autofocus_stack",
"(",
"fieldstack",
",",
"nm",
",",
"res",
",",
"ival",
",",
"roi",
"=",
"None",
",",
"metric",
"=",
"\"average gradient\"",
",",
"padding",
"=",
"True",
",",
"same_dist",
"=",
"False",
",",
"ret_ds",
"=",
"False",
",",
"ret_grad... | Numerical autofocusing of a stack using the Helmholtz equation.
Parameters
----------
fieldstack : 2d or 3d ndarray
Electric field is BG-Corrected, i.e. Field = EX/BEx
nm : float
Refractive index of medium.
res : float
Size of wavelength in pixels.
ival : tuple of floats
Approximate interval to search for optimal focus in px.
metric : str
see `autofocus_field`.
padding : bool
Perform padding with linear ramp from edge to average
to reduce ringing artifacts.
.. versionchanged:: 0.1.4
improved padding value and padding location
ret_dopt : bool
Return optimized distance and gradient plotting data.
same_dist : bool
Refocus entire sinogram with one distance.
red_ds : bool
Return the autofocusing distances in pixels. Defaults to False.
If sam_dist is True, still returns autofocusing distances
of first pass. The used refocusing distance is the
average.
red_grads : bool
Return the computed gradients as a list.
copy : bool
If False, overwrites input array.
Returns
-------
The focused field (and the refocussing distance + data if d is None) | [
"Numerical",
"autofocusing",
"of",
"a",
"stack",
"using",
"the",
"Helmholtz",
"equation",
"."
] | ad09aeecace609ab8f9effcb662d2b7d50826080 | https://github.com/RI-imaging/nrefocus/blob/ad09aeecace609ab8f9effcb662d2b7d50826080/nrefocus/_autofocus.py#L86-L175 | train | 51,172 |
ttinies/sc2gameMapRepo | sc2maptool/index.py | getIndex | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
folderPath = setup()
############################################################################
def folderSearch(path, attrList=[]):
ret = []
for item in glob(os.path.join(path, '*')):
if item == os.sep: continue
itemName = os.path.basename(item)
if os.path.isdir(item): ret += folderSearch(item, attrList + [itemName])
elif itemName.endswith(c.SC2_MAP_EXT): ret.append( MapRecord(itemName, item, attrList) )
return ret
############################################################################
cache.structure = folderSearch(folderPath)
return cache.structure | python | def getIndex(folderPath=None):
"""parse the 'Maps' subfolder directory divining criteria for valid maps"""
try: return cache.structure
except AttributeError: pass # if it doesn't exist, generate and cache the map file data
if folderPath == None:
from sc2maptool.startup import setup
folderPath = setup()
############################################################################
def folderSearch(path, attrList=[]):
ret = []
for item in glob(os.path.join(path, '*')):
if item == os.sep: continue
itemName = os.path.basename(item)
if os.path.isdir(item): ret += folderSearch(item, attrList + [itemName])
elif itemName.endswith(c.SC2_MAP_EXT): ret.append( MapRecord(itemName, item, attrList) )
return ret
############################################################################
cache.structure = folderSearch(folderPath)
return cache.structure | [
"def",
"getIndex",
"(",
"folderPath",
"=",
"None",
")",
":",
"try",
":",
"return",
"cache",
".",
"structure",
"except",
"AttributeError",
":",
"pass",
"# if it doesn't exist, generate and cache the map file data",
"if",
"folderPath",
"==",
"None",
":",
"from",
"sc2m... | parse the 'Maps' subfolder directory divining criteria for valid maps | [
"parse",
"the",
"Maps",
"subfolder",
"directory",
"divining",
"criteria",
"for",
"valid",
"maps"
] | 3a215067fae8f86f6a3ffe37272fbd7a5461cfab | https://github.com/ttinies/sc2gameMapRepo/blob/3a215067fae8f86f6a3ffe37272fbd7a5461cfab/sc2maptool/index.py#L16-L34 | train | 51,173 |
lablup/backend.ai-common | src/ai/backend/common/types.py | _stringify_number | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format(v)
elif isinstance(v, BinarySize):
v = '{:d}'.format(int(v))
elif isinstance(v, int):
v = '{:d}'.format(v)
else:
v = str(v)
return v | python | def _stringify_number(v):
'''
Stringify a number, preventing unwanted scientific notations.
'''
if isinstance(v, (float, Decimal)):
if math.isinf(v) and v > 0:
v = 'Infinity'
elif math.isinf(v) and v < 0:
v = '-Infinity'
else:
v = '{:f}'.format(v)
elif isinstance(v, BinarySize):
v = '{:d}'.format(int(v))
elif isinstance(v, int):
v = '{:d}'.format(v)
else:
v = str(v)
return v | [
"def",
"_stringify_number",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"(",
"float",
",",
"Decimal",
")",
")",
":",
"if",
"math",
".",
"isinf",
"(",
"v",
")",
"and",
"v",
">",
"0",
":",
"v",
"=",
"'Infinity'",
"elif",
"math",
".",
"... | Stringify a number, preventing unwanted scientific notations. | [
"Stringify",
"a",
"number",
"preventing",
"unwanted",
"scientific",
"notations",
"."
] | 20b3a2551ee5bb3b88e7836471bc244a70ad0ae6 | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L692-L709 | train | 51,174 |
lablup/backend.ai-common | src/ai/backend/common/types.py | ImageRef.resolve_alias | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = None
repeats = 0
while repeats < 8:
prev_alias_key = alias_key
alias_key = await etcd.get(f'images/_aliases/{alias_key}')
if alias_key is None:
alias_target = prev_alias_key
break
repeats += 1
else:
raise AliasResolutionFailed('Could not resolve the given image name!')
known_registries = await get_known_registries(etcd)
return cls(alias_target, known_registries) | python | async def resolve_alias(cls, alias_key: str, etcd: etcd.AsyncEtcd):
'''
Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account.
'''
alias_target = None
repeats = 0
while repeats < 8:
prev_alias_key = alias_key
alias_key = await etcd.get(f'images/_aliases/{alias_key}')
if alias_key is None:
alias_target = prev_alias_key
break
repeats += 1
else:
raise AliasResolutionFailed('Could not resolve the given image name!')
known_registries = await get_known_registries(etcd)
return cls(alias_target, known_registries) | [
"async",
"def",
"resolve_alias",
"(",
"cls",
",",
"alias_key",
":",
"str",
",",
"etcd",
":",
"etcd",
".",
"AsyncEtcd",
")",
":",
"alias_target",
"=",
"None",
"repeats",
"=",
"0",
"while",
"repeats",
"<",
"8",
":",
"prev_alias_key",
"=",
"alias_key",
"ali... | Resolve the tag using etcd so that the current instance indicates
a concrete, latest image.
Note that alias resolving does not take the registry component into
account. | [
"Resolve",
"the",
"tag",
"using",
"etcd",
"so",
"that",
"the",
"current",
"instance",
"indicates",
"a",
"concrete",
"latest",
"image",
"."
] | 20b3a2551ee5bb3b88e7836471bc244a70ad0ae6 | https://github.com/lablup/backend.ai-common/blob/20b3a2551ee5bb3b88e7836471bc244a70ad0ae6/src/ai/backend/common/types.py#L249-L269 | train | 51,175 |
anteater/anteater | anteater/main.py | _init_logging | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
# create the directory if it does not exist
path = os.path.dirname(anteater_log)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.FileHandler(anteater_log)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
del logging.root.handlers[:]
logging.root.addHandler(ch)
logging.root.addHandler(handler) | python | def _init_logging(anteater_log):
""" Setup root logger for package """
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
ch.setFormatter(formatter)
ch.setLevel(logging.DEBUG)
# create the directory if it does not exist
path = os.path.dirname(anteater_log)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
handler = logging.FileHandler(anteater_log)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
del logging.root.handlers[:]
logging.root.addHandler(ch)
logging.root.addHandler(handler) | [
"def",
"_init_logging",
"(",
"anteater_log",
")",
":",
"LOG",
".",
"setLevel",
"(",
"logging",
".",
"DEBUG",
")",
"ch",
"=",
"logging",
".",
"StreamHandler",
"(",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"'%(asctime)s - %(name)s - '",
"'%(level... | Setup root logger for package | [
"Setup",
"root",
"logger",
"for",
"package"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L43-L66 | train | 51,176 |
anteater/anteater | anteater/main.py | check_dir | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | python | def check_dir():
""" Creates a directory for scan reports """
try:
os.makedirs(reports_dir)
logger.info('Creating reports directory: %s', reports_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise | [
"def",
"check_dir",
"(",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"reports_dir",
")",
"logger",
".",
"info",
"(",
"'Creating reports directory: %s'",
",",
"reports_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"!=",
"e... | Creates a directory for scan reports | [
"Creates",
"a",
"directory",
"for",
"scan",
"reports"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L69-L76 | train | 51,177 |
anteater/anteater | anteater/main.py | main | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) | python | def main():
""" Main function, mostly for passing arguments """
_init_logging(config.get('config', 'anteater_log'))
check_dir()
arguments = docopt(__doc__, version=__version__)
if arguments['<patchset>']:
prepare_patchset(arguments['<project>'], arguments['<patchset>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls'])
elif arguments['<project_path>']:
prepare_project(arguments['<project>'], arguments['<project_path>'],
arguments['--binaries'], arguments['--ips'], arguments['--urls']) | [
"def",
"main",
"(",
")",
":",
"_init_logging",
"(",
"config",
".",
"get",
"(",
"'config'",
",",
"'anteater_log'",
")",
")",
"check_dir",
"(",
")",
"arguments",
"=",
"docopt",
"(",
"__doc__",
",",
"version",
"=",
"__version__",
")",
"if",
"arguments",
"["... | Main function, mostly for passing arguments | [
"Main",
"function",
"mostly",
"for",
"passing",
"arguments"
] | a980adbed8563ef92494f565acd371e91f50f155 | https://github.com/anteater/anteater/blob/a980adbed8563ef92494f565acd371e91f50f155/anteater/main.py#L79-L90 | train | 51,178 |
hammerlab/stanity | stanity/fit.py | fit | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model
"""
kwargs = dict(kwargs)
kwargs['model_code'] = model_code
if 'n_jobs' not in kwargs:
kwargs['n_jobs'] = -1
if model_code in FIT_CACHE:
print("Reusing model.")
kwargs['fit'] = FIT_CACHE[model_code]
else:
print("NOT reusing model.")
start = time.time()
FIT_CACHE[model_code] = pystan.stan(*args, **kwargs)
print("Ran in %0.3f sec." % (time.time() - start))
return FIT_CACHE[model_code] | python | def fit(model_code, *args, **kwargs):
"""
Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model
"""
kwargs = dict(kwargs)
kwargs['model_code'] = model_code
if 'n_jobs' not in kwargs:
kwargs['n_jobs'] = -1
if model_code in FIT_CACHE:
print("Reusing model.")
kwargs['fit'] = FIT_CACHE[model_code]
else:
print("NOT reusing model.")
start = time.time()
FIT_CACHE[model_code] = pystan.stan(*args, **kwargs)
print("Ran in %0.3f sec." % (time.time() - start))
return FIT_CACHE[model_code] | [
"def",
"fit",
"(",
"model_code",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"=",
"dict",
"(",
"kwargs",
")",
"kwargs",
"[",
"'model_code'",
"]",
"=",
"model_code",
"if",
"'n_jobs'",
"not",
"in",
"kwargs",
":",
"kwargs",
"[",
"'n_j... | Fit a Stan model. Caches the compiled model.
*args and **kwargs are passed to the pystan.stan function.
Arguments you most likely want to pass: data, init, iter, chains.
Unlike pystan.stan, if the n_jobs kwarg is not specified, it defaults to
-1.
Parameters
-------------------
model_code : string
Stan model
Returns
-------------------
pystan StanFit4Model instance : the fit model | [
"Fit",
"a",
"Stan",
"model",
".",
"Caches",
"the",
"compiled",
"model",
"."
] | 6c36abc207c4ce94f78968501dab839a56f35a41 | https://github.com/hammerlab/stanity/blob/6c36abc207c4ce94f78968501dab839a56f35a41/stanity/fit.py#L6-L39 | train | 51,179 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.count | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info) | python | def count(self, other, r, attrs=None, info={}):
""" Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None
"""
r = numpy.array(r, dtype='f8')
return _core.KDNode.count(self, other, r, attrs, info=info) | [
"def",
"count",
"(",
"self",
",",
"other",
",",
"r",
",",
"attrs",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"r",
"=",
"numpy",
".",
"array",
"(",
"r",
",",
"dtype",
"=",
"'f8'",
")",
"return",
"_core",
".",
"KDNode",
".",
"count",
"... | Gray & Moore based fast dual tree counting.
r is the edge of bins:
-inf or r[i-1] < count[i] <= r[i]
attrs: None or tuple
if tuple, attrs = (attr_self, attr_other)
Returns: count,
count, weight of attrs is not None | [
"Gray",
"&",
"Moore",
"based",
"fast",
"dual",
"tree",
"counting",
"."
] | 483548f6d27a4f245cd5d98880b5f4edd6cc8dc1 | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L61-L77 | train | 51,180 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.fof | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | python | def fof(self, linkinglength, out=None, method='splay'):
""" Friend-of-Friend clustering with linking length.
Returns: the label
"""
if out is None:
out = numpy.empty(self.size, dtype='intp')
return _core.KDNode.fof(self, linkinglength, out, method) | [
"def",
"fof",
"(",
"self",
",",
"linkinglength",
",",
"out",
"=",
"None",
",",
"method",
"=",
"'splay'",
")",
":",
"if",
"out",
"is",
"None",
":",
"out",
"=",
"numpy",
".",
"empty",
"(",
"self",
".",
"size",
",",
"dtype",
"=",
"'intp'",
")",
"ret... | Friend-of-Friend clustering with linking length.
Returns: the label | [
"Friend",
"-",
"of",
"-",
"Friend",
"clustering",
"with",
"linking",
"length",
"."
] | 483548f6d27a4f245cd5d98880b5f4edd6cc8dc1 | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L79-L86 | train | 51,181 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.integrate | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | python | def integrate(self, min, max, attr=None, info={}):
""" Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points.
"""
if numpy.isscalar(min):
min = [min for i in range(self.ndims)]
if numpy.isscalar(max):
max = [max for i in range(self.ndims)]
min = numpy.array(min, dtype='f8', order='C')
max = numpy.array(max, dtype='f8', order='C')
if (min).shape[-1] != self.ndims:
raise ValueError("dimension of min does not match Node")
if (max).shape[-1] != self.ndims:
raise ValueError("dimension of max does not match Node")
min, max = broadcast_arrays(min, max)
return _core.KDNode.integrate(self, min, max, attr, info) | [
"def",
"integrate",
"(",
"self",
",",
"min",
",",
"max",
",",
"attr",
"=",
"None",
",",
"info",
"=",
"{",
"}",
")",
":",
"if",
"numpy",
".",
"isscalar",
"(",
"min",
")",
":",
"min",
"=",
"[",
"min",
"for",
"i",
"in",
"range",
"(",
"self",
"."... | Calculate the total number of points between [min, max).
If attr is given, also calculate the sum of the weight.
This is a M log(N) operation, where M is the number of min/max
queries and N is number of points. | [
"Calculate",
"the",
"total",
"number",
"of",
"points",
"between",
"[",
"min",
"max",
")",
"."
] | 483548f6d27a4f245cd5d98880b5f4edd6cc8dc1 | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L88-L110 | train | 51,182 |
rainwoodman/kdcount | kdcount/__init__.py | KDNode.make_forest | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x | python | def make_forest(self, chunksize):
""" Divide a tree branch to a forest,
each subtree of size at most chunksize """
heap = []
heappush(heap, (-self.size, self))
while True:
w, x = heappop(heap)
if w == 0:
heappush(heap, (0, x))
break
if x.less is None \
or (x.size < chunksize):
heappush(heap, (0, x))
continue
heappush(heap, (x.less.size, x.less))
heappush(heap, (x.greater.size, x.greater))
for w, x in heap:
yield x | [
"def",
"make_forest",
"(",
"self",
",",
"chunksize",
")",
":",
"heap",
"=",
"[",
"]",
"heappush",
"(",
"heap",
",",
"(",
"-",
"self",
".",
"size",
",",
"self",
")",
")",
"while",
"True",
":",
"w",
",",
"x",
"=",
"heappop",
"(",
"heap",
")",
"if... | Divide a tree branch to a forest,
each subtree of size at most chunksize | [
"Divide",
"a",
"tree",
"branch",
"to",
"a",
"forest",
"each",
"subtree",
"of",
"size",
"at",
"most",
"chunksize"
] | 483548f6d27a4f245cd5d98880b5f4edd6cc8dc1 | https://github.com/rainwoodman/kdcount/blob/483548f6d27a4f245cd5d98880b5f4edd6cc8dc1/kdcount/__init__.py#L112-L129 | train | 51,183 |
Numigi/gitoo | src/cli.py | _install_all | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
destination = destination or os.path.join(dir_path, '..', '3rd')
conf_file = conf_file or os.path.join(dir_path, '..', "third_party_addons.yaml")
work_directory = os.path.dirname(os.path.realpath(conf_file))
with open(conf_file, "r") as conf_data:
data = yaml.load(conf_data)
for addons in data:
_install_one(
addons['url'],
addons['branch'],
os.path.abspath(destination),
commit=addons.get('commit'),
patches=addons.get('patches'),
exclude_modules=addons.get('excludes'),
include_modules=addons.get('includes'),
base=addons.get('base'),
work_directory=work_directory,
) | python | def _install_all(destination='', conf_file=''):
"""Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
destination = destination or os.path.join(dir_path, '..', '3rd')
conf_file = conf_file or os.path.join(dir_path, '..', "third_party_addons.yaml")
work_directory = os.path.dirname(os.path.realpath(conf_file))
with open(conf_file, "r") as conf_data:
data = yaml.load(conf_data)
for addons in data:
_install_one(
addons['url'],
addons['branch'],
os.path.abspath(destination),
commit=addons.get('commit'),
patches=addons.get('patches'),
exclude_modules=addons.get('excludes'),
include_modules=addons.get('includes'),
base=addons.get('base'),
work_directory=work_directory,
) | [
"def",
"_install_all",
"(",
"destination",
"=",
"''",
",",
"conf_file",
"=",
"''",
")",
":",
"dir_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"realpath",
"(",
"__file__",
")",
")",
"destination",
"=",
"destination",
"or"... | Use the conf file to list all the third party Odoo add-ons that will be installed
and the patches that should be applied.
:param string destination: the folder where add-ons should end up at.
Default: pwd/3rd
:param string conf_file: path to a conf file that describe the add-ons to install.
Default: pwd/third_party_addons.yaml | [
"Use",
"the",
"conf",
"file",
"to",
"list",
"all",
"the",
"third",
"party",
"Odoo",
"add",
"-",
"ons",
"that",
"will",
"be",
"installed",
"and",
"the",
"patches",
"that",
"should",
"be",
"applied",
"."
] | 0921f5fb8a948021760bb0373a40f9fbe8a4a2e5 | https://github.com/Numigi/gitoo/blob/0921f5fb8a948021760bb0373a40f9fbe8a4a2e5/src/cli.py#L69-L96 | train | 51,184 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | find_lt | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | python | def find_lt(a, x):
"""Find rightmost value less than x"""
i = bisect.bisect_left(a, x)
if i:
return a[i-1]
raise ValueError | [
"def",
"find_lt",
"(",
"a",
",",
"x",
")",
":",
"i",
"=",
"bisect",
".",
"bisect_left",
"(",
"a",
",",
"x",
")",
"if",
"i",
":",
"return",
"a",
"[",
"i",
"-",
"1",
"]",
"raise",
"ValueError"
] | Find rightmost value less than x | [
"Find",
"rightmost",
"value",
"less",
"than",
"x"
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L36-L41 | train | 51,185 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | parse | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.path.join(isatab_ref, "i_*.txt")) + \
glob.glob(os.path.join(isatab_ref, "*.idf.txt"))
assert len(fnames) == 1
isatab_ref = fnames[0]
assert os.path.exists(isatab_ref), "Did not find investigation file: %s" % isatab_ref
i_parser = InvestigationParser()
with open(isatab_ref, "rU") as in_handle:
rec = i_parser.parse(in_handle)
s_parser = StudyAssayParser(isatab_ref)
rec = s_parser.parse(rec)
return rec | python | def parse(isatab_ref):
"""Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file.
"""
if os.path.isdir(isatab_ref):
fnames = glob.glob(os.path.join(isatab_ref, "i_*.txt")) + \
glob.glob(os.path.join(isatab_ref, "*.idf.txt"))
assert len(fnames) == 1
isatab_ref = fnames[0]
assert os.path.exists(isatab_ref), "Did not find investigation file: %s" % isatab_ref
i_parser = InvestigationParser()
with open(isatab_ref, "rU") as in_handle:
rec = i_parser.parse(in_handle)
s_parser = StudyAssayParser(isatab_ref)
rec = s_parser.parse(rec)
return rec | [
"def",
"parse",
"(",
"isatab_ref",
")",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"isatab_ref",
")",
":",
"fnames",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"isatab_ref",
",",
"\"i_*.txt\"",
")",
")",
"+",
"glob",
... | Entry point to parse an ISA-Tab directory.
isatab_ref can point to a directory of ISA-Tab data, in which case we
search for the investigator file, or be a reference to the high level
investigation file. | [
"Entry",
"point",
"to",
"parse",
"an",
"ISA",
"-",
"Tab",
"directory",
".",
"isatab_ref",
"can",
"point",
"to",
"a",
"directory",
"of",
"ISA",
"-",
"Tab",
"data",
"in",
"which",
"case",
"we",
"search",
"for",
"the",
"investigator",
"file",
"or",
"be",
... | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L51-L68 | train | 51,186 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._parse_region | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | python | def _parse_region(self, rec, line_iter):
"""Parse a section of an ISA-Tab, assigning information to a supplied record.
"""
had_info = False
keyvals, section = self._parse_keyvals(line_iter)
if keyvals:
rec.metadata = keyvals[0]
while section and section[0] != "STUDY":
had_info = True
keyvals, next_section = self._parse_keyvals(line_iter)
attr_name = self._sections[section[0]]
if attr_name in self._nolist:
try:
keyvals = keyvals[0]
except IndexError:
keyvals = {}
setattr(rec, attr_name, keyvals)
section = next_section
return rec, had_info | [
"def",
"_parse_region",
"(",
"self",
",",
"rec",
",",
"line_iter",
")",
":",
"had_info",
"=",
"False",
"keyvals",
",",
"section",
"=",
"self",
".",
"_parse_keyvals",
"(",
"line_iter",
")",
"if",
"keyvals",
":",
"rec",
".",
"metadata",
"=",
"keyvals",
"["... | Parse a section of an ISA-Tab, assigning information to a supplied record. | [
"Parse",
"a",
"section",
"of",
"an",
"ISA",
"-",
"Tab",
"assigning",
"information",
"to",
"a",
"supplied",
"record",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L109-L129 | train | 51,187 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | InvestigationParser._line_iter | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a single value
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | python | def _line_iter(self, in_handle):
"""Read tab delimited file, handling ISA-Tab special case headers.
"""
reader = csv.reader(in_handle, dialect="excel-tab")
for line in reader:
if len(line) > 0 and line[0]:
# check for section headers; all uppercase and a single value
if line[0].upper() == line[0] and "".join(line[1:]) == "":
line = [line[0]]
yield line | [
"def",
"_line_iter",
"(",
"self",
",",
"in_handle",
")",
":",
"reader",
"=",
"csv",
".",
"reader",
"(",
"in_handle",
",",
"dialect",
"=",
"\"excel-tab\"",
")",
"for",
"line",
"in",
"reader",
":",
"if",
"len",
"(",
"line",
")",
">",
"0",
"and",
"line"... | Read tab delimited file, handling ISA-Tab special case headers. | [
"Read",
"tab",
"delimited",
"file",
"handling",
"ISA",
"-",
"Tab",
"special",
"case",
"headers",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L131-L140 | train | 51,188 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser.parse | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name", "Comment[ENA_SAMPLE]"])
if source_data:
study.nodes = source_data
final_assays = []
for assay in study.assays:
cur_assay = ISATabAssayRecord(assay)
assay_data = self._parse_study(assay["Study Assay File Name"],
["Sample Name","Extract Name","Raw Data File","Derived Data File", "Image File", "Acquisition Parameter Data File", "Free Induction Decay Data File"])
cur_assay.nodes = assay_data
self._get_process_nodes(assay["Study Assay File Name"], cur_assay)
final_assays.append(cur_assay)
study.assays = final_assays
#get process nodes
self._get_process_nodes(study.metadata["Study File Name"], study)
final_studies.append(study)
rec.studies = final_studies
return rec | python | def parse(self, rec):
"""Retrieve row data from files associated with the ISATabRecord.
"""
final_studies = []
for study in rec.studies:
source_data = self._parse_study(study.metadata["Study File Name"],
["Source Name", "Sample Name", "Comment[ENA_SAMPLE]"])
if source_data:
study.nodes = source_data
final_assays = []
for assay in study.assays:
cur_assay = ISATabAssayRecord(assay)
assay_data = self._parse_study(assay["Study Assay File Name"],
["Sample Name","Extract Name","Raw Data File","Derived Data File", "Image File", "Acquisition Parameter Data File", "Free Induction Decay Data File"])
cur_assay.nodes = assay_data
self._get_process_nodes(assay["Study Assay File Name"], cur_assay)
final_assays.append(cur_assay)
study.assays = final_assays
#get process nodes
self._get_process_nodes(study.metadata["Study File Name"], study)
final_studies.append(study)
rec.studies = final_studies
return rec | [
"def",
"parse",
"(",
"self",
",",
"rec",
")",
":",
"final_studies",
"=",
"[",
"]",
"for",
"study",
"in",
"rec",
".",
"studies",
":",
"source_data",
"=",
"self",
".",
"_parse_study",
"(",
"study",
".",
"metadata",
"[",
"\"Study File Name\"",
"]",
",",
"... | Retrieve row data from files associated with the ISATabRecord. | [
"Retrieve",
"row",
"data",
"from",
"files",
"associated",
"with",
"the",
"ISATabRecord",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L193-L216 | train | 51,189 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._parse_study | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type)
except ValueError:
name_index = None
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type,name)
#skip the header line and empty lines
if name in header:
continue
if (not name):
continue
try:
node = nodes[node_index]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs
return dict([(k, self._finalize_metadata(v)) for k, v in nodes.items()]) | python | def _parse_study(self, fname, node_types):
"""Parse study or assay row oriented file around the supplied base node.
"""
if not os.path.exists(os.path.join(self._dir, fname)):
return None
nodes = {}
with open(os.path.join(self._dir, fname), "rU") as in_handle:
reader = csv.reader(in_handle, dialect="excel-tab")
header = self._swap_synonyms(next(reader))
hgroups = self._collapse_header(header)
htypes = self._characterize_header(header, hgroups)
for node_type in node_types:
try:
name_index = header.index(node_type)
except ValueError:
name_index = None
if name_index is None:
#print "Could not find standard header name: %s in %s" \
# % (node_type, header)
continue
in_handle.seek(0, 0)
for line in reader:
name = line[name_index]
#to deal with same name used for different node types (e.g. Source Name and Sample Name using the same string)
node_index = self._build_node_index(node_type,name)
#skip the header line and empty lines
if name in header:
continue
if (not name):
continue
try:
node = nodes[node_index]
except KeyError:
#print("creating node ", name, " index", node_index)
node = NodeRecord(name, node_type)
node.metadata = collections.defaultdict(set)
nodes[node_index] = node
attrs = self._line_keyvals(line, header, hgroups, htypes, node.metadata)
nodes[node_index].metadata = attrs
return dict([(k, self._finalize_metadata(v)) for k, v in nodes.items()]) | [
"def",
"_parse_study",
"(",
"self",
",",
"fname",
",",
"node_types",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_dir",
",",
"fname",
")",
")",
":",
"return",
"None",
"nodes",
"... | Parse study or assay row oriented file around the supplied base node. | [
"Parse",
"study",
"or",
"assay",
"row",
"oriented",
"file",
"around",
"the",
"supplied",
"base",
"node",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L292-L335 | train | 51,190 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._finalize_metadata | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
final[key] = list(val)
node.metadata = final
return node | python | def _finalize_metadata(self, node):
"""Convert node metadata back into a standard dictionary and list.
"""
final = {}
for key, val in iter(node.metadata.items()):
#val = list(val)
#if isinstance(val[0], tuple):
# val = [dict(v) for v in val]
final[key] = list(val)
node.metadata = final
return node | [
"def",
"_finalize_metadata",
"(",
"self",
",",
"node",
")",
":",
"final",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"iter",
"(",
"node",
".",
"metadata",
".",
"items",
"(",
")",
")",
":",
"#val = list(val)",
"#if isinstance(val[0], tuple):",
"# val... | Convert node metadata back into a standard dictionary and list. | [
"Convert",
"node",
"metadata",
"back",
"into",
"a",
"standard",
"dictionary",
"and",
"list",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L337-L347 | train | 51,191 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._line_by_type | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | python | def _line_by_type(self, line, header, hgroups, htypes, out, want_type,
collapse_quals_fn = None):
"""Parse out key value pairs for line information based on a group of values.
"""
for index, htype in ((i, t) for i, t in enumerate(htypes) if t == want_type):
col = hgroups[index][0]
key = header[col]#self._clean_header(header[col])
if collapse_quals_fn:
val = collapse_quals_fn(line, header, hgroups[index])
else:
val = line[col]
out[key].add(val)
return out | [
"def",
"_line_by_type",
"(",
"self",
",",
"line",
",",
"header",
",",
"hgroups",
",",
"htypes",
",",
"out",
",",
"want_type",
",",
"collapse_quals_fn",
"=",
"None",
")",
":",
"for",
"index",
",",
"htype",
"in",
"(",
"(",
"i",
",",
"t",
")",
"for",
... | Parse out key value pairs for line information based on a group of values. | [
"Parse",
"out",
"key",
"value",
"pairs",
"for",
"line",
"information",
"based",
"on",
"a",
"group",
"of",
"values",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L357-L369 | train | 51,192 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_attributes | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
vals.append(line[i])
Attrs = collections.namedtuple('Attrs', names)
return Attrs(*vals) | python | def _collapse_attributes(self, line, header, indexes):
"""Combine attributes in multiple columns into single named tuple.
"""
names = []
vals = []
pat = re.compile("[\W]+")
for i in indexes:
names.append(pat.sub("_", self._clean_header(header[i])))
vals.append(line[i])
Attrs = collections.namedtuple('Attrs', names)
return Attrs(*vals) | [
"def",
"_collapse_attributes",
"(",
"self",
",",
"line",
",",
"header",
",",
"indexes",
")",
":",
"names",
"=",
"[",
"]",
"vals",
"=",
"[",
"]",
"pat",
"=",
"re",
".",
"compile",
"(",
"\"[\\W]+\"",
")",
"for",
"i",
"in",
"indexes",
":",
"names",
".... | Combine attributes in multiple columns into single named tuple. | [
"Combine",
"attributes",
"in",
"multiple",
"columns",
"into",
"single",
"named",
"tuple",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L371-L381 | train | 51,193 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._characterize_header | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
this_ctype = ctype
break
out.append(this_ctype)
return out | python | def _characterize_header(self, header, hgroups):
"""Characterize header groups into different data types.
"""
out = []
for h in [header[g[0]] for g in hgroups]:
this_ctype = None
for ctype, names in self._col_types.items():
if h.startswith(names):
this_ctype = ctype
break
out.append(this_ctype)
return out | [
"def",
"_characterize_header",
"(",
"self",
",",
"header",
",",
"hgroups",
")",
":",
"out",
"=",
"[",
"]",
"for",
"h",
"in",
"[",
"header",
"[",
"g",
"[",
"0",
"]",
"]",
"for",
"g",
"in",
"hgroups",
"]",
":",
"this_ctype",
"=",
"None",
"for",
"ct... | Characterize header groups into different data types. | [
"Characterize",
"header",
"groups",
"into",
"different",
"data",
"types",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L397-L408 | train | 51,194 |
ISA-tools/biopy-isatab | bcbio/isatab/parser.py | StudyAssayParser._collapse_header | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | python | def _collapse_header(self, header):
"""Combine header columns into related groups.
"""
out = []
for i, h in enumerate(header):
if h.startswith(self._col_quals):
out[-1].append(i)
else:
out.append([i])
return out | [
"def",
"_collapse_header",
"(",
"self",
",",
"header",
")",
":",
"out",
"=",
"[",
"]",
"for",
"i",
",",
"h",
"in",
"enumerate",
"(",
"header",
")",
":",
"if",
"h",
".",
"startswith",
"(",
"self",
".",
"_col_quals",
")",
":",
"out",
"[",
"-",
"1",... | Combine header columns into related groups. | [
"Combine",
"header",
"columns",
"into",
"related",
"groups",
"."
] | fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27 | https://github.com/ISA-tools/biopy-isatab/blob/fe42c98184d5eb5f28d8c0b7c3fc63a9b9729f27/bcbio/isatab/parser.py#L410-L419 | train | 51,195 |
MacHu-GWU/dataIO-project | dataIO/pk.py | load | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
if default is None:
default = dict()
prt("\nLoad from '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if not os.path.exists(abspath):
prt(" File not found, use default value: %r" % default,
enable_verbose)
return default
st = time.clock()
if is_pickle:
data = pickle.loads(textfile.readbytes(abspath))
else:
data = pickle.loads(compress.read_gzip(abspath))
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
return data | python | def load(abspath, default=None, enable_verbose=True):
"""Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
if default is None:
default = dict()
prt("\nLoad from '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if not os.path.exists(abspath):
prt(" File not found, use default value: %r" % default,
enable_verbose)
return default
st = time.clock()
if is_pickle:
data = pickle.loads(textfile.readbytes(abspath))
else:
data = pickle.loads(compress.read_gzip(abspath))
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose)
return data | [
"def",
"load",
"(",
"abspath",
",",
"default",
"=",
"None",
",",
"enable_verbose",
"=",
"True",
")",
":",
"if",
"default",
"is",
"None",
":",
"default",
"=",
"dict",
"(",
")",
"prt",
"(",
"\"\\nLoad from '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
... | Load Pickle from file. If file are not exists, returns ``default``.
:param abspath: file path. use absolute path as much as you can.
extension has to be ``.pickle`` or ``.gz`` (for compressed Pickle).
:type abspath: string
:param default: default ``dict()``, if ``abspath`` not exists, return the
default Python object instead.
:param enable_verbose: default ``True``, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> pk.load("test.pickle") # if you have a pickle file
Load from `test.pickle` ...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Pickle文件中读取数据
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param default: 默认 ``dict()``, 如果文件路径不存在, 则会返回指定的默认值
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值`` | [
"Load",
"Pickle",
"from",
"file",
".",
"If",
"file",
"are",
"not",
"exists",
"returns",
"default",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L72-L126 | train | 51,196 |
MacHu-GWU/dataIO-project | dataIO/pk.py | dump | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
prt("\nDump to '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if os.path.exists(abspath):
if not overwrite: # 存在, 并且overwrite=False
prt(" Stop! File exists and overwrite is not allowed",
enable_verbose)
return
st = time.clock()
content = pickle.dumps(data, pk_protocol)
if is_pickle:
textfile.writebytes(content, abspath)
else:
compress.write_gzip(content, abspath)
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) | python | def dump(data, abspath, pk_protocol=py23.pk_protocol,
overwrite=False, enable_verbose=True):
"""Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值``
"""
prt("\nDump to '%s' ..." % abspath, enable_verbose)
abspath = lower_ext(str(abspath))
is_pickle = is_pickle_file(abspath)
if os.path.exists(abspath):
if not overwrite: # 存在, 并且overwrite=False
prt(" Stop! File exists and overwrite is not allowed",
enable_verbose)
return
st = time.clock()
content = pickle.dumps(data, pk_protocol)
if is_pickle:
textfile.writebytes(content, abspath)
else:
compress.write_gzip(content, abspath)
prt(" Complete! Elapse %.6f sec." % (time.clock() - st), enable_verbose) | [
"def",
"dump",
"(",
"data",
",",
"abspath",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
",",
"overwrite",
"=",
"False",
",",
"enable_verbose",
"=",
"True",
")",
":",
"prt",
"(",
"\"\\nDump to '%s' ...\"",
"%",
"abspath",
",",
"enable_verbose",
")",
... | Dump picklable object to file.
Provides multiple choice to customize the behavior.
:param data: picklable python object.
:type data: dict or list
:param abspath: ``save as`` path, file extension has to be ``.pickle`` or ``.gz``
(for compressed Pickle)
:type abspath: string
:param pk_protocol: default = your python version, use 2, to make a
py2.x/3.x compatible pickle file. But 3 is faster.
:type pk_protocol: int
:param overwrite: default ``False``, If ``True``, when you dump to existing
file, it silently overwrite it. If ``False``, an alert message is shown.
Default setting ``False`` is to prevent overwrite file by mistake.
:type overwrite: boolean
:param enable_verbose: default True, help-message-display trigger.
:type enable_verbose: boolean
Usage::
>>> from dataIO import pk
>>> data = {"a": 1, "b": 2}
>>> dump(data, "test.pickle", overwrite=True)
Dump to `test.pickle` ...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param data: 可Pickle化的Python对象
:type data: ``字典`` 或 ``列表``
:param abspath: Pickle文件绝对路径, 扩展名需为 ``.pickle`` 或 ``.gz``, 其中 ``.gz``
是被压缩后的Pickle文件
:type abspath: ``字符串``
:param pk_protocol: 默认值为你的Python大版本号, 使用2可以使得Python2/3都能
兼容你的Pickle文件。不过Python3的速度更快。
:type pk_protocol: int
:param overwrite: 默认 ``False``, 当为``True``时, 如果写入路径已经存在, 则会
自动覆盖原文件。而为``False``时, 则会打印警告文件, 防止误操作覆盖源文件。
:type overwrite: "布尔值"
:param enable_verbose: 默认 ``True``, 信息提示的开关, 批处理时建议关闭
:type enable_verbose: ``布尔值`` | [
"Dump",
"picklable",
"object",
"to",
"file",
".",
"Provides",
"multiple",
"choice",
"to",
"customize",
"the",
"behavior",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L129-L204 | train | 51,197 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2bytes | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | python | def obj2bytes(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr
"""
return pickle.dumps(obj, protocol=pk_protocol) | [
"def",
"obj2bytes",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")"
] | Convert arbitrary pickable Python Object to bytes.
**中文文档**
将可Pickle化的Python对象转化为bytestr | [
"Convert",
"arbitrary",
"pickable",
"Python",
"Object",
"to",
"bytes",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L232-L239 | train | 51,198 |
MacHu-GWU/dataIO-project | dataIO/pk.py | obj2str | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | python | def obj2str(obj, pk_protocol=py23.pk_protocol):
"""Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串``
"""
return base64.urlsafe_b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | [
"def",
"obj2str",
"(",
"obj",
",",
"pk_protocol",
"=",
"py23",
".",
"pk_protocol",
")",
":",
"return",
"base64",
".",
"urlsafe_b64encode",
"(",
"pickle",
".",
"dumps",
"(",
"obj",
",",
"protocol",
"=",
"pk_protocol",
")",
")",
".",
"decode",
"(",
"\"utf-... | Convert arbitrary object to base64 encoded string.
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的 ``纯ASCII字符串`` | [
"Convert",
"arbitrary",
"object",
"to",
"base64",
"encoded",
"string",
"."
] | 7e1cc192b5e53426eed6dbd742918619b8fd60ab | https://github.com/MacHu-GWU/dataIO-project/blob/7e1cc192b5e53426eed6dbd742918619b8fd60ab/dataIO/pk.py#L252-L260 | train | 51,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.