repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
guaix-ucm/pyemir | emirdrp/instrument/csu_configuration.py | merge_odd_even_csu_configurations | def merge_odd_even_csu_configurations(conf_odd, conf_even):
"""Merge CSU configuration using odd- and even-numbered values.
The CSU returned CSU configuration include the odd-numbered values
from 'conf_odd' and the even-numbered values from 'conf_even'.
Parameters
----------
conf_odd : CsuConfiguration instance
CSU configuration corresponding to odd-numbered slitlets.
conf_even : CsuConfiguration instance
CSU configuration corresponding to even-numbered slitlets.
Returns
-------
merged_conf : CsuConfiguration instance
CSU configuration resulting from the merging process.
"""
# initialize resulting CsuConfiguration instance using one of the
# input configuration corresponding to the odd-numbered slitlets
merged_conf = deepcopy(conf_odd)
# update the resulting configuration with the values corresponding
# to the even-numbered slitlets
for i in range(EMIR_NBARS):
ibar = i + 1
if ibar % 2 == 0:
merged_conf._csu_bar_left[i] = conf_even._csu_bar_left[i]
merged_conf._csu_bar_right[i] = conf_even._csu_bar_right[i]
merged_conf._csu_bar_slit_center[i] = \
conf_even._csu_bar_slit_center[i]
merged_conf._csu_bar_slit_width[i] = \
conf_even._csu_bar_slit_width[i]
# return merged configuration
return merged_conf | python | def merge_odd_even_csu_configurations(conf_odd, conf_even):
"""Merge CSU configuration using odd- and even-numbered values.
The CSU returned CSU configuration include the odd-numbered values
from 'conf_odd' and the even-numbered values from 'conf_even'.
Parameters
----------
conf_odd : CsuConfiguration instance
CSU configuration corresponding to odd-numbered slitlets.
conf_even : CsuConfiguration instance
CSU configuration corresponding to even-numbered slitlets.
Returns
-------
merged_conf : CsuConfiguration instance
CSU configuration resulting from the merging process.
"""
# initialize resulting CsuConfiguration instance using one of the
# input configuration corresponding to the odd-numbered slitlets
merged_conf = deepcopy(conf_odd)
# update the resulting configuration with the values corresponding
# to the even-numbered slitlets
for i in range(EMIR_NBARS):
ibar = i + 1
if ibar % 2 == 0:
merged_conf._csu_bar_left[i] = conf_even._csu_bar_left[i]
merged_conf._csu_bar_right[i] = conf_even._csu_bar_right[i]
merged_conf._csu_bar_slit_center[i] = \
conf_even._csu_bar_slit_center[i]
merged_conf._csu_bar_slit_width[i] = \
conf_even._csu_bar_slit_width[i]
# return merged configuration
return merged_conf | [
"def",
"merge_odd_even_csu_configurations",
"(",
"conf_odd",
",",
"conf_even",
")",
":",
"# initialize resulting CsuConfiguration instance using one of the",
"# input configuration corresponding to the odd-numbered slitlets",
"merged_conf",
"=",
"deepcopy",
"(",
"conf_odd",
")",
"# u... | Merge CSU configuration using odd- and even-numbered values.
The CSU returned CSU configuration include the odd-numbered values
from 'conf_odd' and the even-numbered values from 'conf_even'.
Parameters
----------
conf_odd : CsuConfiguration instance
CSU configuration corresponding to odd-numbered slitlets.
conf_even : CsuConfiguration instance
CSU configuration corresponding to even-numbered slitlets.
Returns
-------
merged_conf : CsuConfiguration instance
CSU configuration resulting from the merging process. | [
"Merge",
"CSU",
"configuration",
"using",
"odd",
"-",
"and",
"even",
"-",
"numbered",
"values",
"."
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/instrument/csu_configuration.py#L210-L247 | train | 49,300 |
guaix-ucm/pyemir | emirdrp/instrument/csu_configuration.py | CsuConfiguration.define_from_header | def define_from_header(cls, image_header):
"""Define class members directly from FITS header.
Parameters
----------
image_header : instance of hdulist.header
Header content from a FITS file.
"""
self = CsuConfiguration()
# declare lists to store configuration of CSU bars
self._csu_bar_left = []
self._csu_bar_right = []
self._csu_bar_slit_center = []
self._csu_bar_slit_width = []
for i in range(EMIR_NBARS):
ibar = i + 1
keyword = 'CSUP{}'.format(ibar)
if keyword in image_header:
self._csu_bar_left.append(image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
keyword = 'CSUP{}'.format(ibar + EMIR_NBARS)
if keyword in image_header:
# set the same origin as the one employed for _csu_bar_left
self._csu_bar_right.append(341.5 - image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
self._csu_bar_slit_center.append(
(self._csu_bar_left[i] + self._csu_bar_right[i]) / 2
)
self._csu_bar_slit_width.append(
self._csu_bar_right[i] - self._csu_bar_left[i]
)
return self | python | def define_from_header(cls, image_header):
"""Define class members directly from FITS header.
Parameters
----------
image_header : instance of hdulist.header
Header content from a FITS file.
"""
self = CsuConfiguration()
# declare lists to store configuration of CSU bars
self._csu_bar_left = []
self._csu_bar_right = []
self._csu_bar_slit_center = []
self._csu_bar_slit_width = []
for i in range(EMIR_NBARS):
ibar = i + 1
keyword = 'CSUP{}'.format(ibar)
if keyword in image_header:
self._csu_bar_left.append(image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
keyword = 'CSUP{}'.format(ibar + EMIR_NBARS)
if keyword in image_header:
# set the same origin as the one employed for _csu_bar_left
self._csu_bar_right.append(341.5 - image_header[keyword])
else:
raise ValueError("Expected keyword " + keyword + " not found!")
self._csu_bar_slit_center.append(
(self._csu_bar_left[i] + self._csu_bar_right[i]) / 2
)
self._csu_bar_slit_width.append(
self._csu_bar_right[i] - self._csu_bar_left[i]
)
return self | [
"def",
"define_from_header",
"(",
"cls",
",",
"image_header",
")",
":",
"self",
"=",
"CsuConfiguration",
"(",
")",
"# declare lists to store configuration of CSU bars",
"self",
".",
"_csu_bar_left",
"=",
"[",
"]",
"self",
".",
"_csu_bar_right",
"=",
"[",
"]",
"sel... | Define class members directly from FITS header.
Parameters
----------
image_header : instance of hdulist.header
Header content from a FITS file. | [
"Define",
"class",
"members",
"directly",
"from",
"FITS",
"header",
"."
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/instrument/csu_configuration.py#L100-L138 | train | 49,301 |
guaix-ucm/pyemir | emirdrp/instrument/csu_configuration.py | CsuConfiguration.widths_in_range_mm | def widths_in_range_mm(
self,
minwidth=EMIR_MINIMUM_SLITLET_WIDTH_MM,
maxwidth=EMIR_MAXIMUM_SLITLET_WIDTH_MM
):
"""Return list of slitlets which width is within given range
Parameters
----------
minwidth : float
Minimum slit width (mm).
maxwidth : float
Maximum slit width (mm).
Returns
-------
list_ok : list
List of booleans indicating whether the corresponding
slitlet width is within range
"""
list_ok = []
for i in range(EMIR_NBARS):
slitlet_ok = minwidth <= self._csu_bar_slit_width[i] <= maxwidth
if slitlet_ok:
list_ok.append(i + 1)
return list_ok | python | def widths_in_range_mm(
self,
minwidth=EMIR_MINIMUM_SLITLET_WIDTH_MM,
maxwidth=EMIR_MAXIMUM_SLITLET_WIDTH_MM
):
"""Return list of slitlets which width is within given range
Parameters
----------
minwidth : float
Minimum slit width (mm).
maxwidth : float
Maximum slit width (mm).
Returns
-------
list_ok : list
List of booleans indicating whether the corresponding
slitlet width is within range
"""
list_ok = []
for i in range(EMIR_NBARS):
slitlet_ok = minwidth <= self._csu_bar_slit_width[i] <= maxwidth
if slitlet_ok:
list_ok.append(i + 1)
return list_ok | [
"def",
"widths_in_range_mm",
"(",
"self",
",",
"minwidth",
"=",
"EMIR_MINIMUM_SLITLET_WIDTH_MM",
",",
"maxwidth",
"=",
"EMIR_MAXIMUM_SLITLET_WIDTH_MM",
")",
":",
"list_ok",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"EMIR_NBARS",
")",
":",
"slitlet_ok",
"=",... | Return list of slitlets which width is within given range
Parameters
----------
minwidth : float
Minimum slit width (mm).
maxwidth : float
Maximum slit width (mm).
Returns
-------
list_ok : list
List of booleans indicating whether the corresponding
slitlet width is within range | [
"Return",
"list",
"of",
"slitlets",
"which",
"width",
"is",
"within",
"given",
"range"
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/instrument/csu_configuration.py#L179-L207 | train | 49,302 |
guaix-ucm/pyemir | emirdrp/recipes/aiv/procedures.py | encloses_annulus | def encloses_annulus(x_min, x_max, y_min, y_max, nx, ny, r_in, r_out):
'''Encloses function backported from old photutils'''
gout = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_out, 1, 1)
gin = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_in, 1, 1)
return gout - gin | python | def encloses_annulus(x_min, x_max, y_min, y_max, nx, ny, r_in, r_out):
'''Encloses function backported from old photutils'''
gout = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_out, 1, 1)
gin = circular_overlap_grid(x_min, x_max, y_min, y_max, nx, ny, r_in, 1, 1)
return gout - gin | [
"def",
"encloses_annulus",
"(",
"x_min",
",",
"x_max",
",",
"y_min",
",",
"y_max",
",",
"nx",
",",
"ny",
",",
"r_in",
",",
"r_out",
")",
":",
"gout",
"=",
"circular_overlap_grid",
"(",
"x_min",
",",
"x_max",
",",
"y_min",
",",
"y_max",
",",
"nx",
","... | Encloses function backported from old photutils | [
"Encloses",
"function",
"backported",
"from",
"old",
"photutils"
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/recipes/aiv/procedures.py#L43-L48 | train | 49,303 |
guaix-ucm/pyemir | emirdrp/processing/wavecal/slitlet2d.py | Slitlet2D.ximshow_unrectified | def ximshow_unrectified(self, slitlet2d):
"""Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
"""
title = "Slitlet#" + str(self.islitlet)
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1)
ylower = self.list_spectrails[0](xdum)
ax.plot(xdum, ylower, 'b-')
ymiddle = self.list_spectrails[1](xdum)
ax.plot(xdum, ymiddle, 'b--')
yupper = self.list_spectrails[2](xdum)
ax.plot(xdum, yupper, 'b-')
ylower_frontier = self.list_frontiers[0](xdum)
ax.plot(xdum, ylower_frontier, 'b:')
yupper_frontier = self.list_frontiers[1](xdum)
ax.plot(xdum, yupper_frontier, 'b:')
pause_debugplot(debugplot=self.debugplot, pltshow=True) | python | def ximshow_unrectified(self, slitlet2d):
"""Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
"""
title = "Slitlet#" + str(self.islitlet)
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1)
ylower = self.list_spectrails[0](xdum)
ax.plot(xdum, ylower, 'b-')
ymiddle = self.list_spectrails[1](xdum)
ax.plot(xdum, ymiddle, 'b--')
yupper = self.list_spectrails[2](xdum)
ax.plot(xdum, yupper, 'b-')
ylower_frontier = self.list_frontiers[0](xdum)
ax.plot(xdum, ylower_frontier, 'b:')
yupper_frontier = self.list_frontiers[1](xdum)
ax.plot(xdum, yupper_frontier, 'b:')
pause_debugplot(debugplot=self.debugplot, pltshow=True) | [
"def",
"ximshow_unrectified",
"(",
"self",
",",
"slitlet2d",
")",
":",
"title",
"=",
"\"Slitlet#\"",
"+",
"str",
"(",
"self",
".",
"islitlet",
")",
"ax",
"=",
"ximshow",
"(",
"slitlet2d",
",",
"title",
"=",
"title",
",",
"first_pixel",
"=",
"(",
"self",
... | Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image. | [
"Display",
"unrectified",
"image",
"with",
"spectrails",
"and",
"frontiers",
"."
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/slitlet2d.py#L406-L431 | train | 49,304 |
guaix-ucm/pyemir | emirdrp/processing/wavecal/slitlet2d.py | Slitlet2D.ximshow_rectified | def ximshow_rectified(self, slitlet2d_rect):
"""Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image
"""
title = "Slitlet#" + str(self.islitlet) + " (rectify)"
ax = ximshow(slitlet2d_rect, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
# grid with fitted transformation: spectrum trails
xx = np.arange(0, self.bb_nc2_orig - self.bb_nc1_orig + 1,
dtype=np.float)
for spectrail in self.list_spectrails:
yy0 = self.corr_yrect_a + \
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b")
for spectrail in self.list_frontiers:
yy0 = self.corr_yrect_a +\
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b:")
# show plot
pause_debugplot(self.debugplot, pltshow=True) | python | def ximshow_rectified(self, slitlet2d_rect):
"""Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image
"""
title = "Slitlet#" + str(self.islitlet) + " (rectify)"
ax = ximshow(slitlet2d_rect, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
# grid with fitted transformation: spectrum trails
xx = np.arange(0, self.bb_nc2_orig - self.bb_nc1_orig + 1,
dtype=np.float)
for spectrail in self.list_spectrails:
yy0 = self.corr_yrect_a + \
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b")
for spectrail in self.list_frontiers:
yy0 = self.corr_yrect_a +\
self.corr_yrect_b * spectrail(self.x0_reference)
yy = np.tile([yy0 - self.bb_ns1_orig], xx.size)
ax.plot(xx + self.bb_nc1_orig, yy + self.bb_ns1_orig, "b:")
# show plot
pause_debugplot(self.debugplot, pltshow=True) | [
"def",
"ximshow_rectified",
"(",
"self",
",",
"slitlet2d_rect",
")",
":",
"title",
"=",
"\"Slitlet#\"",
"+",
"str",
"(",
"self",
".",
"islitlet",
")",
"+",
"\" (rectify)\"",
"ax",
"=",
"ximshow",
"(",
"slitlet2d_rect",
",",
"title",
"=",
"title",
",",
"fir... | Display rectified image with spectrails and frontiers.
Parameters
----------
slitlet2d_rect : numpy array
Array containing the rectified slitlet image | [
"Display",
"rectified",
"image",
"with",
"spectrails",
"and",
"frontiers",
"."
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/slitlet2d.py#L433-L461 | train | 49,305 |
IdentityPython/oidcendpoint | src/oidcendpoint/jwt_token.py | JWTToken.is_expired | def is_expired(self, token, when=0):
"""
Evaluate whether the token has expired or not
:param token: The token
:param when: The time against which to check the expiration
0 means now.
:return: True/False
"""
verifier = JWT(key_jar=self.key_jar, allowed_sign_algs=[self.alg])
_payload = verifier.unpack(token)
return is_expired(_payload['exp'], when) | python | def is_expired(self, token, when=0):
"""
Evaluate whether the token has expired or not
:param token: The token
:param when: The time against which to check the expiration
0 means now.
:return: True/False
"""
verifier = JWT(key_jar=self.key_jar, allowed_sign_algs=[self.alg])
_payload = verifier.unpack(token)
return is_expired(_payload['exp'], when) | [
"def",
"is_expired",
"(",
"self",
",",
"token",
",",
"when",
"=",
"0",
")",
":",
"verifier",
"=",
"JWT",
"(",
"key_jar",
"=",
"self",
".",
"key_jar",
",",
"allowed_sign_algs",
"=",
"[",
"self",
".",
"alg",
"]",
")",
"_payload",
"=",
"verifier",
".",
... | Evaluate whether the token has expired or not
:param token: The token
:param when: The time against which to check the expiration
0 means now.
:return: True/False | [
"Evaluate",
"whether",
"the",
"token",
"has",
"expired",
"or",
"not"
] | 6c1d729d51bfb6332816117fe476073df7a1d823 | https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/jwt_token.py#L67-L78 | train | 49,306 |
Fuyukai/asyncwebsockets | asyncwebsockets/client.py | open_websocket | async def open_websocket(url: str,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""
Opens a websocket.
"""
ws = await create_websocket(
url, headers=headers, subprotocols=subprotocols)
try:
yield ws
finally:
await ws.close() | python | async def open_websocket(url: str,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""
Opens a websocket.
"""
ws = await create_websocket(
url, headers=headers, subprotocols=subprotocols)
try:
yield ws
finally:
await ws.close() | [
"async",
"def",
"open_websocket",
"(",
"url",
":",
"str",
",",
"headers",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"subprotocols",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
")",
":",
"ws",
"=",
"await",
"create_websocket",
"(",
"url",... | Opens a websocket. | [
"Opens",
"a",
"websocket",
"."
] | e33e75fd51ce5ae0feac244e8407d2672c5b4745 | https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/client.py#L20-L31 | train | 49,307 |
Fuyukai/asyncwebsockets | asyncwebsockets/client.py | create_websocket | async def create_websocket(url: str,
ssl: Optional[SSLContext] = None,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""
A more low-level form of open_websocket.
You are responsible for closing this websocket.
"""
url = yarl.URL(url)
args = {}
if headers:
args["headers"] = headers
# automatically use ssl if it's websocket secure
if ssl is None:
ssl = url.scheme == "wss"
if ssl:
if ssl is True:
ssl = SSLContext()
args["ssl_context"] = ssl
args["autostart_tls"] = True
args["tls_standard_compatible"] = False
addr = (url.host, int(url.port))
ws = Websocket()
await ws.__ainit__(
addr=addr, path=url.path_qs, subprotocols=subprotocols, **args)
return ws | python | async def create_websocket(url: str,
ssl: Optional[SSLContext] = None,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""
A more low-level form of open_websocket.
You are responsible for closing this websocket.
"""
url = yarl.URL(url)
args = {}
if headers:
args["headers"] = headers
# automatically use ssl if it's websocket secure
if ssl is None:
ssl = url.scheme == "wss"
if ssl:
if ssl is True:
ssl = SSLContext()
args["ssl_context"] = ssl
args["autostart_tls"] = True
args["tls_standard_compatible"] = False
addr = (url.host, int(url.port))
ws = Websocket()
await ws.__ainit__(
addr=addr, path=url.path_qs, subprotocols=subprotocols, **args)
return ws | [
"async",
"def",
"create_websocket",
"(",
"url",
":",
"str",
",",
"ssl",
":",
"Optional",
"[",
"SSLContext",
"]",
"=",
"None",
",",
"headers",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"subprotocols",
":",
"Optional",
"[",
"list",
"]",
"=",
... | A more low-level form of open_websocket.
You are responsible for closing this websocket. | [
"A",
"more",
"low",
"-",
"level",
"form",
"of",
"open_websocket",
".",
"You",
"are",
"responsible",
"for",
"closing",
"this",
"websocket",
"."
] | e33e75fd51ce5ae0feac244e8407d2672c5b4745 | https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/client.py#L34-L61 | train | 49,308 |
Fuyukai/asyncwebsockets | asyncwebsockets/client.py | open_websocket_client | async def open_websocket_client(sock: anyio.abc.SocketStream,
addr,
path: str,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""Create a websocket on top of a socket."""
ws = await create_websocket_client(
sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)
try:
yield ws
finally:
await ws.close() | python | async def open_websocket_client(sock: anyio.abc.SocketStream,
addr,
path: str,
headers: Optional[list] = None,
subprotocols: Optional[list] = None):
"""Create a websocket on top of a socket."""
ws = await create_websocket_client(
sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)
try:
yield ws
finally:
await ws.close() | [
"async",
"def",
"open_websocket_client",
"(",
"sock",
":",
"anyio",
".",
"abc",
".",
"SocketStream",
",",
"addr",
",",
"path",
":",
"str",
",",
"headers",
":",
"Optional",
"[",
"list",
"]",
"=",
"None",
",",
"subprotocols",
":",
"Optional",
"[",
"list",
... | Create a websocket on top of a socket. | [
"Create",
"a",
"websocket",
"on",
"top",
"of",
"a",
"socket",
"."
] | e33e75fd51ce5ae0feac244e8407d2672c5b4745 | https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/client.py#L65-L76 | train | 49,309 |
Fuyukai/asyncwebsockets | asyncwebsockets/client.py | create_websocket_client | async def create_websocket_client(sock: anyio.abc.SocketStream,
addr,
path: str,
headers: Optional[List] = None,
subprotocols: Optional[List[str]] = None):
"""
A more low-level form of create_websocket_client.
You are responsible for closing this websocket.
"""
ws = Websocket()
await ws.start_client(
sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)
return ws | python | async def create_websocket_client(sock: anyio.abc.SocketStream,
addr,
path: str,
headers: Optional[List] = None,
subprotocols: Optional[List[str]] = None):
"""
A more low-level form of create_websocket_client.
You are responsible for closing this websocket.
"""
ws = Websocket()
await ws.start_client(
sock, addr=addr, path=path, headers=headers, subprotocols=subprotocols)
return ws | [
"async",
"def",
"create_websocket_client",
"(",
"sock",
":",
"anyio",
".",
"abc",
".",
"SocketStream",
",",
"addr",
",",
"path",
":",
"str",
",",
"headers",
":",
"Optional",
"[",
"List",
"]",
"=",
"None",
",",
"subprotocols",
":",
"Optional",
"[",
"List"... | A more low-level form of create_websocket_client.
You are responsible for closing this websocket. | [
"A",
"more",
"low",
"-",
"level",
"form",
"of",
"create_websocket_client",
".",
"You",
"are",
"responsible",
"for",
"closing",
"this",
"websocket",
"."
] | e33e75fd51ce5ae0feac244e8407d2672c5b4745 | https://github.com/Fuyukai/asyncwebsockets/blob/e33e75fd51ce5ae0feac244e8407d2672c5b4745/asyncwebsockets/client.py#L79-L91 | train | 49,310 |
meraki-analytics/cassiopeia-datastores | cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py | SQLStore._one | def _one(self, query):
"""Gets one row from the query. Raises NotFoundError if there isn't a row or if there are multiple rows"""
try:
result = query.one()
if result.has_expired(self._expirations):
raise NotFoundError
return result
except (NoResultFound, MultipleResultsFound):
raise NotFoundError | python | def _one(self, query):
"""Gets one row from the query. Raises NotFoundError if there isn't a row or if there are multiple rows"""
try:
result = query.one()
if result.has_expired(self._expirations):
raise NotFoundError
return result
except (NoResultFound, MultipleResultsFound):
raise NotFoundError | [
"def",
"_one",
"(",
"self",
",",
"query",
")",
":",
"try",
":",
"result",
"=",
"query",
".",
"one",
"(",
")",
"if",
"result",
".",
"has_expired",
"(",
"self",
".",
"_expirations",
")",
":",
"raise",
"NotFoundError",
"return",
"result",
"except",
"(",
... | Gets one row from the query. Raises NotFoundError if there isn't a row or if there are multiple rows | [
"Gets",
"one",
"row",
"from",
"the",
"query",
".",
"Raises",
"NotFoundError",
"if",
"there",
"isn",
"t",
"a",
"row",
"or",
"if",
"there",
"are",
"multiple",
"rows"
] | 1919b79b8b036d48818eb648e712df41f8a1299c | https://github.com/meraki-analytics/cassiopeia-datastores/blob/1919b79b8b036d48818eb648e712df41f8a1299c/cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py#L112-L120 | train | 49,311 |
meraki-analytics/cassiopeia-datastores | cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py | SQLStore._first | def _first(self, query):
"""Gets the first row of the query. Raises NotFoundError if there isn't a row"""
result = query.first()
if result is None:
raise NotFoundError
else:
if result.has_expired(self._expirations):
raise NotFoundError
return result | python | def _first(self, query):
"""Gets the first row of the query. Raises NotFoundError if there isn't a row"""
result = query.first()
if result is None:
raise NotFoundError
else:
if result.has_expired(self._expirations):
raise NotFoundError
return result | [
"def",
"_first",
"(",
"self",
",",
"query",
")",
":",
"result",
"=",
"query",
".",
"first",
"(",
")",
"if",
"result",
"is",
"None",
":",
"raise",
"NotFoundError",
"else",
":",
"if",
"result",
".",
"has_expired",
"(",
"self",
".",
"_expirations",
")",
... | Gets the first row of the query. Raises NotFoundError if there isn't a row | [
"Gets",
"the",
"first",
"row",
"of",
"the",
"query",
".",
"Raises",
"NotFoundError",
"if",
"there",
"isn",
"t",
"a",
"row"
] | 1919b79b8b036d48818eb648e712df41f8a1299c | https://github.com/meraki-analytics/cassiopeia-datastores/blob/1919b79b8b036d48818eb648e712df41f8a1299c/cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py#L122-L130 | train | 49,312 |
meraki-analytics/cassiopeia-datastores | cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py | SQLStore._all | def _all(self, query):
"""Gets all rows of the query. Raises a NotFoundError if there are 0 rows"""
if query.count() > 0:
results = query.all()
for result in results:
if result.has_expired(self._expirations):
raise NotFoundError
return results
else:
raise NotFoundError | python | def _all(self, query):
"""Gets all rows of the query. Raises a NotFoundError if there are 0 rows"""
if query.count() > 0:
results = query.all()
for result in results:
if result.has_expired(self._expirations):
raise NotFoundError
return results
else:
raise NotFoundError | [
"def",
"_all",
"(",
"self",
",",
"query",
")",
":",
"if",
"query",
".",
"count",
"(",
")",
">",
"0",
":",
"results",
"=",
"query",
".",
"all",
"(",
")",
"for",
"result",
"in",
"results",
":",
"if",
"result",
".",
"has_expired",
"(",
"self",
".",
... | Gets all rows of the query. Raises a NotFoundError if there are 0 rows | [
"Gets",
"all",
"rows",
"of",
"the",
"query",
".",
"Raises",
"a",
"NotFoundError",
"if",
"there",
"are",
"0",
"rows"
] | 1919b79b8b036d48818eb648e712df41f8a1299c | https://github.com/meraki-analytics/cassiopeia-datastores/blob/1919b79b8b036d48818eb648e712df41f8a1299c/cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py#L132-L141 | train | 49,313 |
meraki-analytics/cassiopeia-datastores | cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py | SQLStore._put | def _put(self, item: SQLBaseObject):
"""Puts a item into the database. Updates lastUpdate column"""
if item._dto_type in self._expirations and self._expirations[item._dto_type] == 0:
# The expiration time has been set to 0 -> shoud not be cached
return
item.updated()
self._session().merge(item) | python | def _put(self, item: SQLBaseObject):
"""Puts a item into the database. Updates lastUpdate column"""
if item._dto_type in self._expirations and self._expirations[item._dto_type] == 0:
# The expiration time has been set to 0 -> shoud not be cached
return
item.updated()
self._session().merge(item) | [
"def",
"_put",
"(",
"self",
",",
"item",
":",
"SQLBaseObject",
")",
":",
"if",
"item",
".",
"_dto_type",
"in",
"self",
".",
"_expirations",
"and",
"self",
".",
"_expirations",
"[",
"item",
".",
"_dto_type",
"]",
"==",
"0",
":",
"# The expiration time has b... | Puts a item into the database. Updates lastUpdate column | [
"Puts",
"a",
"item",
"into",
"the",
"database",
".",
"Updates",
"lastUpdate",
"column"
] | 1919b79b8b036d48818eb648e712df41f8a1299c | https://github.com/meraki-analytics/cassiopeia-datastores/blob/1919b79b8b036d48818eb648e712df41f8a1299c/cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py#L144-L150 | train | 49,314 |
meraki-analytics/cassiopeia-datastores | cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py | SQLStore._put_many | def _put_many(self, items: Iterable[DtoObject], cls):
"""Puts many items into the database. Updates lastUpdate column for each of them"""
if cls._dto_type in self._expirations and self._expirations[cls._dto_type] == 0:
# The expiration time has been set to 0 -> shoud not be cached
return
session = self._session
for item in items:
item = cls(**item)
item.updated()
session.merge(item) | python | def _put_many(self, items: Iterable[DtoObject], cls):
"""Puts many items into the database. Updates lastUpdate column for each of them"""
if cls._dto_type in self._expirations and self._expirations[cls._dto_type] == 0:
# The expiration time has been set to 0 -> shoud not be cached
return
session = self._session
for item in items:
item = cls(**item)
item.updated()
session.merge(item) | [
"def",
"_put_many",
"(",
"self",
",",
"items",
":",
"Iterable",
"[",
"DtoObject",
"]",
",",
"cls",
")",
":",
"if",
"cls",
".",
"_dto_type",
"in",
"self",
".",
"_expirations",
"and",
"self",
".",
"_expirations",
"[",
"cls",
".",
"_dto_type",
"]",
"==",
... | Puts many items into the database. Updates lastUpdate column for each of them | [
"Puts",
"many",
"items",
"into",
"the",
"database",
".",
"Updates",
"lastUpdate",
"column",
"for",
"each",
"of",
"them"
] | 1919b79b8b036d48818eb648e712df41f8a1299c | https://github.com/meraki-analytics/cassiopeia-datastores/blob/1919b79b8b036d48818eb648e712df41f8a1299c/cassiopeia-sqlstore/cassiopeia_sqlstore/SQLStore.py#L153-L162 | train | 49,315 |
IdentityPython/oidcendpoint | src/oidcendpoint/user_info/__init__.py | UserInfo.filter | def filter(self, userinfo, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
if user_info_claims is None:
return copy.copy(userinfo)
else:
result = {}
missing = []
optional = []
for key, restr in user_info_claims.items():
try:
result[key] = userinfo[key]
except KeyError:
if restr == {"essential": True}:
missing.append(key)
else:
optional.append(key)
return result | python | def filter(self, userinfo, user_info_claims=None):
"""
Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims.
"""
if user_info_claims is None:
return copy.copy(userinfo)
else:
result = {}
missing = []
optional = []
for key, restr in user_info_claims.items():
try:
result[key] = userinfo[key]
except KeyError:
if restr == {"essential": True}:
missing.append(key)
else:
optional.append(key)
return result | [
"def",
"filter",
"(",
"self",
",",
"userinfo",
",",
"user_info_claims",
"=",
"None",
")",
":",
"if",
"user_info_claims",
"is",
"None",
":",
"return",
"copy",
".",
"copy",
"(",
"userinfo",
")",
"else",
":",
"result",
"=",
"{",
"}",
"missing",
"=",
"[",
... | Return only those claims that are asked for.
It's a best effort task; if essential claims are not present
no error is flagged.
:param userinfo: A dictionary containing the available info for one user
:param user_info_claims: A dictionary specifying the asked for claims
:return: A dictionary of filtered claims. | [
"Return",
"only",
"those",
"claims",
"that",
"are",
"asked",
"for",
".",
"It",
"s",
"a",
"best",
"effort",
"task",
";",
"if",
"essential",
"claims",
"are",
"not",
"present",
"no",
"error",
"is",
"flagged",
"."
] | 6c1d729d51bfb6332816117fe476073df7a1d823 | https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/user_info/__init__.py#L41-L66 | train | 49,316 |
volafiled/python-volapi | volapi/chat.py | ChatMessage.from_data | def from_data(room, conn, data):
"""Construct a ChatMessage instance from raw protocol data"""
files = list()
rooms = dict()
msg = str()
for part in data["message"]:
ptype = part["type"]
if ptype == "text":
val = part["value"]
msg += val
elif ptype == "break":
msg += "\n"
elif ptype == "file":
fileid = part["id"]
fileobj = room.filedict.get(fileid)
if fileobj:
files += (fileobj,)
fileid = f"@{fileid}"
msg += fileid
elif ptype == "room":
roomid = part["id"]
rooms[roomid] = part["name"]
roomid = f"#{roomid}"
msg += roomid
elif ptype == "url":
msg += part["text"]
elif ptype == "raw":
msg += html_to_text(part["value"])
else:
import warnings
warnings.warn(f"unknown message type '{ptype}'", Warning)
nick = data.get("nick") or data.get("user")
options = data.get("options", dict())
data = data.get("data", dict())
message = ChatMessage(
room,
conn,
nick,
msg,
roles=Roles.from_options(options),
options=options,
data=data,
files=files,
rooms=rooms,
)
return message | python | def from_data(room, conn, data):
"""Construct a ChatMessage instance from raw protocol data"""
files = list()
rooms = dict()
msg = str()
for part in data["message"]:
ptype = part["type"]
if ptype == "text":
val = part["value"]
msg += val
elif ptype == "break":
msg += "\n"
elif ptype == "file":
fileid = part["id"]
fileobj = room.filedict.get(fileid)
if fileobj:
files += (fileobj,)
fileid = f"@{fileid}"
msg += fileid
elif ptype == "room":
roomid = part["id"]
rooms[roomid] = part["name"]
roomid = f"#{roomid}"
msg += roomid
elif ptype == "url":
msg += part["text"]
elif ptype == "raw":
msg += html_to_text(part["value"])
else:
import warnings
warnings.warn(f"unknown message type '{ptype}'", Warning)
nick = data.get("nick") or data.get("user")
options = data.get("options", dict())
data = data.get("data", dict())
message = ChatMessage(
room,
conn,
nick,
msg,
roles=Roles.from_options(options),
options=options,
data=data,
files=files,
rooms=rooms,
)
return message | [
"def",
"from_data",
"(",
"room",
",",
"conn",
",",
"data",
")",
":",
"files",
"=",
"list",
"(",
")",
"rooms",
"=",
"dict",
"(",
")",
"msg",
"=",
"str",
"(",
")",
"for",
"part",
"in",
"data",
"[",
"\"message\"",
"]",
":",
"ptype",
"=",
"part",
"... | Construct a ChatMessage instance from raw protocol data | [
"Construct",
"a",
"ChatMessage",
"instance",
"from",
"raw",
"protocol",
"data"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/chat.py#L80-L129 | train | 49,317 |
guaix-ucm/pyemir | emirdrp/recipes/acquisition/maskcheck.py | create_rot2d | def create_rot2d(angle):
"""Create 2D rotation matrix"""
ca = math.cos(angle)
sa = math.sin(angle)
return np.array([[ca, -sa], [sa, ca]]) | python | def create_rot2d(angle):
"""Create 2D rotation matrix"""
ca = math.cos(angle)
sa = math.sin(angle)
return np.array([[ca, -sa], [sa, ca]]) | [
"def",
"create_rot2d",
"(",
"angle",
")",
":",
"ca",
"=",
"math",
".",
"cos",
"(",
"angle",
")",
"sa",
"=",
"math",
".",
"sin",
"(",
"angle",
")",
"return",
"np",
".",
"array",
"(",
"[",
"[",
"ca",
",",
"-",
"sa",
"]",
",",
"[",
"sa",
",",
... | Create 2D rotation matrix | [
"Create",
"2D",
"rotation",
"matrix"
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/recipes/acquisition/maskcheck.py#L47-L51 | train | 49,318 |
guaix-ucm/pyemir | emirdrp/recipes/acquisition/maskcheck.py | comp_centroid | def comp_centroid(data, bounding_box, debug_plot=False, plot_reference=None, logger=None):
"""Detect objects in a region and return the centroid of the brightest one"""
from matplotlib.patches import Ellipse
if logger is None:
logger = logging.getLogger(__name__)
region = bounding_box.slice
ref_x = region[1].start
ref_y = region[0].start
logger.debug('region ofset is %s, %s', ref_x, ref_y)
subimage = data[region].copy()
bkg = sep.Background(subimage)
data_sub = subimage - bkg
objects = sep.extract(data_sub, 1.5, err=bkg.globalrms)
# Select brightest object
logger.debug('%d object found', len(objects))
if len(objects) == 0:
# print('No objects')
return None
iadx = objects['flux'].argmax()
# plot background-subtracted image
maxflux = objects[iadx]
if debug_plot:
fig, ax = plt.subplots()
m, s = np.mean(data_sub), np.std(data_sub)
ax.imshow(data_sub, interpolation='nearest', cmap='gray',
vmin=m - s, vmax=m + s, origin='lower',
extent=bounding_box.extent)
if plot_reference:
e = Ellipse(xy=(plot_reference[0], plot_reference[1]),
width=6,
height=6,
angle=0)
e.set_facecolor('none')
e.set_edgecolor('green')
ax.add_artist(e)
# plot an ellipse for each object
for idx, obj in enumerate(objects):
e = Ellipse(xy=(obj['x'] + ref_x, obj['y'] + ref_y),
width=6 * obj['a'],
height=6 * obj['b'],
angle=obj['theta'] * 180. / np.pi)
e.set_facecolor('none')
if idx == iadx:
e.set_edgecolor('blue')
else:
e.set_edgecolor('red')
ax.add_artist(e)
return maxflux['x'], maxflux['y'], ax
else:
return maxflux['x'], maxflux['y'] | python | def comp_centroid(data, bounding_box, debug_plot=False, plot_reference=None, logger=None):
"""Detect objects in a region and return the centroid of the brightest one"""
from matplotlib.patches import Ellipse
if logger is None:
logger = logging.getLogger(__name__)
region = bounding_box.slice
ref_x = region[1].start
ref_y = region[0].start
logger.debug('region ofset is %s, %s', ref_x, ref_y)
subimage = data[region].copy()
bkg = sep.Background(subimage)
data_sub = subimage - bkg
objects = sep.extract(data_sub, 1.5, err=bkg.globalrms)
# Select brightest object
logger.debug('%d object found', len(objects))
if len(objects) == 0:
# print('No objects')
return None
iadx = objects['flux'].argmax()
# plot background-subtracted image
maxflux = objects[iadx]
if debug_plot:
fig, ax = plt.subplots()
m, s = np.mean(data_sub), np.std(data_sub)
ax.imshow(data_sub, interpolation='nearest', cmap='gray',
vmin=m - s, vmax=m + s, origin='lower',
extent=bounding_box.extent)
if plot_reference:
e = Ellipse(xy=(plot_reference[0], plot_reference[1]),
width=6,
height=6,
angle=0)
e.set_facecolor('none')
e.set_edgecolor('green')
ax.add_artist(e)
# plot an ellipse for each object
for idx, obj in enumerate(objects):
e = Ellipse(xy=(obj['x'] + ref_x, obj['y'] + ref_y),
width=6 * obj['a'],
height=6 * obj['b'],
angle=obj['theta'] * 180. / np.pi)
e.set_facecolor('none')
if idx == iadx:
e.set_edgecolor('blue')
else:
e.set_edgecolor('red')
ax.add_artist(e)
return maxflux['x'], maxflux['y'], ax
else:
return maxflux['x'], maxflux['y'] | [
"def",
"comp_centroid",
"(",
"data",
",",
"bounding_box",
",",
"debug_plot",
"=",
"False",
",",
"plot_reference",
"=",
"None",
",",
"logger",
"=",
"None",
")",
":",
"from",
"matplotlib",
".",
"patches",
"import",
"Ellipse",
"if",
"logger",
"is",
"None",
":... | Detect objects in a region and return the centroid of the brightest one | [
"Detect",
"objects",
"in",
"a",
"region",
"and",
"return",
"the",
"centroid",
"of",
"the",
"brightest",
"one"
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/recipes/acquisition/maskcheck.py#L63-L119 | train | 49,319 |
IdentityPython/oidcendpoint | src/oidcendpoint/id_token.py | IDToken.sign_encrypt | def sign_encrypt(self, session_info, client_id, code=None,
access_token=None, user_info=None, sign=True,
encrypt=False, extra_claims=None):
"""
Signed and or encrypt a IDToken
:param session_info: Session information
:param client_id: Client ID
:param code: Access grant
:param access_token: Access Token
:param user_info: User information
:param sign: If the JWT should be signed
:param encrypt: If the JWT should be encrypted
:param extra_claims: Extra claims to be added to the ID Token
:return: IDToken as a signed and/or encrypted JWT
"""
_cntx = self.endpoint_context
client_info = _cntx.cdb[client_id]
alg_dict = get_sign_and_encrypt_algorithms(_cntx, client_info,
'id_token', sign=sign,
encrypt=encrypt)
_authn_event = session_info['authn_event']
_idt_info = self.payload(session_info,
acr=_authn_event["authn_info"],
alg=alg_dict['sign_alg'], code=code,
access_token=access_token, user_info=user_info,
auth_time=_authn_event["authn_time"],
extra_claims=extra_claims)
_jwt = JWT(_cntx.keyjar, iss=_cntx.issuer,
lifetime=_idt_info['lifetime'], **alg_dict)
return _jwt.pack(_idt_info['payload'], recv=client_id) | python | def sign_encrypt(self, session_info, client_id, code=None,
access_token=None, user_info=None, sign=True,
encrypt=False, extra_claims=None):
"""
Signed and or encrypt a IDToken
:param session_info: Session information
:param client_id: Client ID
:param code: Access grant
:param access_token: Access Token
:param user_info: User information
:param sign: If the JWT should be signed
:param encrypt: If the JWT should be encrypted
:param extra_claims: Extra claims to be added to the ID Token
:return: IDToken as a signed and/or encrypted JWT
"""
_cntx = self.endpoint_context
client_info = _cntx.cdb[client_id]
alg_dict = get_sign_and_encrypt_algorithms(_cntx, client_info,
'id_token', sign=sign,
encrypt=encrypt)
_authn_event = session_info['authn_event']
_idt_info = self.payload(session_info,
acr=_authn_event["authn_info"],
alg=alg_dict['sign_alg'], code=code,
access_token=access_token, user_info=user_info,
auth_time=_authn_event["authn_time"],
extra_claims=extra_claims)
_jwt = JWT(_cntx.keyjar, iss=_cntx.issuer,
lifetime=_idt_info['lifetime'], **alg_dict)
return _jwt.pack(_idt_info['payload'], recv=client_id) | [
"def",
"sign_encrypt",
"(",
"self",
",",
"session_info",
",",
"client_id",
",",
"code",
"=",
"None",
",",
"access_token",
"=",
"None",
",",
"user_info",
"=",
"None",
",",
"sign",
"=",
"True",
",",
"encrypt",
"=",
"False",
",",
"extra_claims",
"=",
"None"... | Signed and or encrypt a IDToken
:param session_info: Session information
:param client_id: Client ID
:param code: Access grant
:param access_token: Access Token
:param user_info: User information
:param sign: If the JWT should be signed
:param encrypt: If the JWT should be encrypted
:param extra_claims: Extra claims to be added to the ID Token
:return: IDToken as a signed and/or encrypted JWT | [
"Signed",
"and",
"or",
"encrypt",
"a",
"IDToken"
] | 6c1d729d51bfb6332816117fe476073df7a1d823 | https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/id_token.py#L135-L171 | train | 49,320 |
BreakingBytes/simkit | simkit/core/layers.py | Layer.add | def add(self, src_cls, module, package=None):
"""
Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError`
"""
# import module containing the layer class
mod = importlib.import_module(module, package)
# get layer class definition from the module
self.sources[src_cls] = getattr(mod, src_cls) | python | def add(self, src_cls, module, package=None):
"""
Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError`
"""
# import module containing the layer class
mod = importlib.import_module(module, package)
# get layer class definition from the module
self.sources[src_cls] = getattr(mod, src_cls) | [
"def",
"add",
"(",
"self",
",",
"src_cls",
",",
"module",
",",
"package",
"=",
"None",
")",
":",
"# import module containing the layer class",
"mod",
"=",
"importlib",
".",
"import_module",
"(",
"module",
",",
"package",
")",
"# get layer class definition from the m... | Add layer class to model. This method may be overloaded by layer.
:param src_cls: layer class to add, should not start with underscores
:type src_cls: str
:param module: Python module that contains layer class
:type module: str
:param package: optional package containing module with layer class
:type package: str
:raises: :exc:`~exceptions.NotImplementedError` | [
"Add",
"layer",
"class",
"to",
"model",
".",
"This",
"method",
"may",
"be",
"overloaded",
"by",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L64-L79 | train | 49,321 |
BreakingBytes/simkit | simkit/core/layers.py | Data.add | def add(self, data_source, module, package=None):
"""
Add data_source to model. Tries to import module, then looks for data
source class definition.
:param data_source: Name of data source to add.
:type data_source: str
:param module: Module in which data source resides. Can be absolute or
relative. See :func:`importlib.import_module`
:type module: str
:param package: Optional, but must be used if module is relative.
:type package: str
.. seealso::
:func:`importlib.import_module`
"""
super(Data, self).add(data_source, module, package)
# only update layer info if it is missing!
if data_source not in self.layer:
# copy data source parameters to :attr:`Layer.layer`
self.layer[data_source] = {'module': module, 'package': package}
# add a place holder for the data source object when it's constructed
self.objects[data_source] = None | python | def add(self, data_source, module, package=None):
"""
Add data_source to model. Tries to import module, then looks for data
source class definition.
:param data_source: Name of data source to add.
:type data_source: str
:param module: Module in which data source resides. Can be absolute or
relative. See :func:`importlib.import_module`
:type module: str
:param package: Optional, but must be used if module is relative.
:type package: str
.. seealso::
:func:`importlib.import_module`
"""
super(Data, self).add(data_source, module, package)
# only update layer info if it is missing!
if data_source not in self.layer:
# copy data source parameters to :attr:`Layer.layer`
self.layer[data_source] = {'module': module, 'package': package}
# add a place holder for the data source object when it's constructed
self.objects[data_source] = None | [
"def",
"add",
"(",
"self",
",",
"data_source",
",",
"module",
",",
"package",
"=",
"None",
")",
":",
"super",
"(",
"Data",
",",
"self",
")",
".",
"add",
"(",
"data_source",
",",
"module",
",",
"package",
")",
"# only update layer info if it is missing!",
"... | Add data_source to model. Tries to import module, then looks for data
source class definition.
:param data_source: Name of data source to add.
:type data_source: str
:param module: Module in which data source resides. Can be absolute or
relative. See :func:`importlib.import_module`
:type module: str
:param package: Optional, but must be used if module is relative.
:type package: str
.. seealso::
:func:`importlib.import_module` | [
"Add",
"data_source",
"to",
"model",
".",
"Tries",
"to",
"import",
"module",
"then",
"looks",
"for",
"data",
"source",
"class",
"definition",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L125-L147 | train | 49,322 |
BreakingBytes/simkit | simkit/core/layers.py | Data.open | def open(self, data_source, *args, **kwargs):
"""
Open filename to get data for data_source.
:param data_source: Data source for which the file contains data.
:type data_source: str
Positional and keyword arguments can contain either the data to use for
the data source or the full path of the file which contains data for the
data source.
"""
if self.sources[data_source]._meta.data_reader.is_file_reader:
filename = kwargs.get('filename')
path = kwargs.get('path', '')
rel_path = kwargs.get('rel_path', '')
if len(args) > 0:
filename = args[0]
if len(args) > 1:
path = args[1]
if len(args) > 2:
rel_path = args[2]
args = ()
kwargs = {'filename': os.path.join(rel_path, path, filename)}
LOGGER.debug('filename: %s', kwargs['filename'])
# call constructor of data source with filename argument
self.objects[data_source] = self.sources[data_source](*args, **kwargs)
# register data and uncertainty in registry
data_src_obj = self.objects[data_source]
meta = [getattr(data_src_obj, m) for m in self.reg.meta_names]
self.reg.register(data_src_obj.data, *meta) | python | def open(self, data_source, *args, **kwargs):
"""
Open filename to get data for data_source.
:param data_source: Data source for which the file contains data.
:type data_source: str
Positional and keyword arguments can contain either the data to use for
the data source or the full path of the file which contains data for the
data source.
"""
if self.sources[data_source]._meta.data_reader.is_file_reader:
filename = kwargs.get('filename')
path = kwargs.get('path', '')
rel_path = kwargs.get('rel_path', '')
if len(args) > 0:
filename = args[0]
if len(args) > 1:
path = args[1]
if len(args) > 2:
rel_path = args[2]
args = ()
kwargs = {'filename': os.path.join(rel_path, path, filename)}
LOGGER.debug('filename: %s', kwargs['filename'])
# call constructor of data source with filename argument
self.objects[data_source] = self.sources[data_source](*args, **kwargs)
# register data and uncertainty in registry
data_src_obj = self.objects[data_source]
meta = [getattr(data_src_obj, m) for m in self.reg.meta_names]
self.reg.register(data_src_obj.data, *meta) | [
"def",
"open",
"(",
"self",
",",
"data_source",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"sources",
"[",
"data_source",
"]",
".",
"_meta",
".",
"data_reader",
".",
"is_file_reader",
":",
"filename",
"=",
"kwargs",
".",
... | Open filename to get data for data_source.
:param data_source: Data source for which the file contains data.
:type data_source: str
Positional and keyword arguments can contain either the data to use for
the data source or the full path of the file which contains data for the
data source. | [
"Open",
"filename",
"to",
"get",
"data",
"for",
"data_source",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L149-L178 | train | 49,323 |
BreakingBytes/simkit | simkit/core/layers.py | Data.load | def load(self, rel_path=None):
"""
Add data_sources to layer and open files with data for the data_source.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package'))
filename = v.get('filename')
path = v.get('path')
if filename:
# default path for data is in ../data
if not path:
path = rel_path
else:
path = os.path.join(rel_path, path)
# filename can be a list or a string, concatenate list with
# os.pathsep and append the full path to strings.
if isinstance(filename, basestring):
filename = os.path.join(path, filename)
else:
file_list = [os.path.join(path, f) for f in filename]
filename = os.path.pathsep.join(file_list)
self.open(k, filename) | python | def load(self, rel_path=None):
"""
Add data_sources to layer and open files with data for the data_source.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package'))
filename = v.get('filename')
path = v.get('path')
if filename:
# default path for data is in ../data
if not path:
path = rel_path
else:
path = os.path.join(rel_path, path)
# filename can be a list or a string, concatenate list with
# os.pathsep and append the full path to strings.
if isinstance(filename, basestring):
filename = os.path.join(path, filename)
else:
file_list = [os.path.join(path, f) for f in filename]
filename = os.path.pathsep.join(file_list)
self.open(k, filename) | [
"def",
"load",
"(",
"self",
",",
"rel_path",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"layer",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"add",
"(",
"k",
",",
"v",
"[",
"'module'",
"]",
",",
"v",
".",
"get",
"(",
... | Add data_sources to layer and open files with data for the data_source. | [
"Add",
"data_sources",
"to",
"layer",
"and",
"open",
"files",
"with",
"data",
"for",
"the",
"data_source",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L180-L201 | train | 49,324 |
BreakingBytes/simkit | simkit/core/layers.py | Data.edit | def edit(self, data_src, value):
"""
Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict
"""
# check if opening file
if 'filename' in value:
items = [k for k, v in self.reg.data_source.iteritems() if
v == data_src]
self.reg.unregister(items) # remove items from Registry
# open file and register new data
self.open(data_src, value['filename'], value.get('path'))
self.layer[data_src].update(value) | python | def edit(self, data_src, value):
"""
Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict
"""
# check if opening file
if 'filename' in value:
items = [k for k, v in self.reg.data_source.iteritems() if
v == data_src]
self.reg.unregister(items) # remove items from Registry
# open file and register new data
self.open(data_src, value['filename'], value.get('path'))
self.layer[data_src].update(value) | [
"def",
"edit",
"(",
"self",
",",
"data_src",
",",
"value",
")",
":",
"# check if opening file",
"if",
"'filename'",
"in",
"value",
":",
"items",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"self",
".",
"reg",
".",
"data_source",
".",
"iteritems",
"(",
... | Edit data layer.
:param data_src: Name of :class:`DataSource` to edit.
:type data_src: str
:param value: Values to edit.
:type value: dict | [
"Edit",
"data",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L203-L219 | train | 49,325 |
BreakingBytes/simkit | simkit/core/layers.py | Data.delete | def delete(self, data_src):
"""
Delete data sources.
"""
items = self.objects[data_src].data.keys() # items to edit
self.reg.unregister(items) # remove items from Registry
self.layer.pop(data_src) # remove data source from layer
self.objects.pop(data_src) # remove data_source object
self.sources.pop(data_src) | python | def delete(self, data_src):
"""
Delete data sources.
"""
items = self.objects[data_src].data.keys() # items to edit
self.reg.unregister(items) # remove items from Registry
self.layer.pop(data_src) # remove data source from layer
self.objects.pop(data_src) # remove data_source object
self.sources.pop(data_src) | [
"def",
"delete",
"(",
"self",
",",
"data_src",
")",
":",
"items",
"=",
"self",
".",
"objects",
"[",
"data_src",
"]",
".",
"data",
".",
"keys",
"(",
")",
"# items to edit",
"self",
".",
"reg",
".",
"unregister",
"(",
"items",
")",
"# remove items from Reg... | Delete data sources. | [
"Delete",
"data",
"sources",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L221-L229 | train | 49,326 |
BreakingBytes/simkit | simkit/core/layers.py | Formulas.load | def load(self, _=None):
"""
Add formulas to layer.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package')) | python | def load(self, _=None):
"""
Add formulas to layer.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package')) | [
"def",
"load",
"(",
"self",
",",
"_",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"layer",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"add",
"(",
"k",
",",
"v",
"[",
"'module'",
"]",
",",
"v",
".",
"get",
"(",
"'packa... | Add formulas to layer. | [
"Add",
"formulas",
"to",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L265-L270 | train | 49,327 |
BreakingBytes/simkit | simkit/core/layers.py | Calculations.add | def add(self, calc, module, package=None):
"""
Add calc to layer.
"""
super(Calculations, self).add(calc, module, package)
# only update layer info if it is missing!
if calc not in self.layer:
# copy calc source parameters to :attr:`Layer.layer`
self.layer[calc] = {'module': module, 'package': package}
# instantiate the calc object
self.objects[calc] = self.sources[calc]()
# register calc and dependencies in registry
calc_src_obj = self.objects[calc]
meta = [getattr(calc_src_obj, m) for m in self.reg.meta_names]
self.reg.register(calc_src_obj.calcs, *meta) | python | def add(self, calc, module, package=None):
"""
Add calc to layer.
"""
super(Calculations, self).add(calc, module, package)
# only update layer info if it is missing!
if calc not in self.layer:
# copy calc source parameters to :attr:`Layer.layer`
self.layer[calc] = {'module': module, 'package': package}
# instantiate the calc object
self.objects[calc] = self.sources[calc]()
# register calc and dependencies in registry
calc_src_obj = self.objects[calc]
meta = [getattr(calc_src_obj, m) for m in self.reg.meta_names]
self.reg.register(calc_src_obj.calcs, *meta) | [
"def",
"add",
"(",
"self",
",",
"calc",
",",
"module",
",",
"package",
"=",
"None",
")",
":",
"super",
"(",
"Calculations",
",",
"self",
")",
".",
"add",
"(",
"calc",
",",
"module",
",",
"package",
")",
"# only update layer info if it is missing!",
"if",
... | Add calc to layer. | [
"Add",
"calc",
"to",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L286-L300 | train | 49,328 |
BreakingBytes/simkit | simkit/core/layers.py | Outputs.add | def add(self, output, module, package=None):
"""
Add output to
"""
super(Outputs, self).add(output, module, package)
# only update layer info if it is missing!
if output not in self.layer:
# copy output source parameters to :attr:`Layer.layer`
self.layer[output] = {'module': module, 'package': package}
# instantiate the output object
self.objects[output] = self.sources[output]()
# register outputs and meta-data in registry
out_src_obj = self.objects[output]
meta = [getattr(out_src_obj, m) for m in self.reg.meta_names]
self.reg.register(out_src_obj.outputs, *meta) | python | def add(self, output, module, package=None):
"""
Add output to
"""
super(Outputs, self).add(output, module, package)
# only update layer info if it is missing!
if output not in self.layer:
# copy output source parameters to :attr:`Layer.layer`
self.layer[output] = {'module': module, 'package': package}
# instantiate the output object
self.objects[output] = self.sources[output]()
# register outputs and meta-data in registry
out_src_obj = self.objects[output]
meta = [getattr(out_src_obj, m) for m in self.reg.meta_names]
self.reg.register(out_src_obj.outputs, *meta) | [
"def",
"add",
"(",
"self",
",",
"output",
",",
"module",
",",
"package",
"=",
"None",
")",
":",
"super",
"(",
"Outputs",
",",
"self",
")",
".",
"add",
"(",
"output",
",",
"module",
",",
"package",
")",
"# only update layer info if it is missing!",
"if",
... | Add output to | [
"Add",
"output",
"to"
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L326-L340 | train | 49,329 |
BreakingBytes/simkit | simkit/core/layers.py | Simulations.add | def add(self, sim, module, package=None):
"""
Add simulation to layer.
"""
super(Simulations, self).add(sim, module, package)
# only update layer info if it is missing!
if sim not in self.layer:
# copy simulation source parameters to :attr:`Layer.layer`
self.layer[sim] = {'module': module, 'package': package} | python | def add(self, sim, module, package=None):
"""
Add simulation to layer.
"""
super(Simulations, self).add(sim, module, package)
# only update layer info if it is missing!
if sim not in self.layer:
# copy simulation source parameters to :attr:`Layer.layer`
self.layer[sim] = {'module': module, 'package': package} | [
"def",
"add",
"(",
"self",
",",
"sim",
",",
"module",
",",
"package",
"=",
"None",
")",
":",
"super",
"(",
"Simulations",
",",
"self",
")",
".",
"add",
"(",
"sim",
",",
"module",
",",
"package",
")",
"# only update layer info if it is missing!",
"if",
"s... | Add simulation to layer. | [
"Add",
"simulation",
"to",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L366-L374 | train | 49,330 |
BreakingBytes/simkit | simkit/core/layers.py | Simulations.load | def load(self, rel_path=None):
"""
Add sim_src to layer.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package'))
filename = v.get('filename')
path = v.get('path')
if filename:
warnings.warn(DeprecationWarning(SIMFILE_LOAD_WARNING))
# default path for data is in ../simulations
if not path:
path = rel_path
else:
path = os.path.join(rel_path, path)
filename = os.path.join(path, filename)
self.open(k, filename) | python | def load(self, rel_path=None):
"""
Add sim_src to layer.
"""
for k, v in self.layer.iteritems():
self.add(k, v['module'], v.get('package'))
filename = v.get('filename')
path = v.get('path')
if filename:
warnings.warn(DeprecationWarning(SIMFILE_LOAD_WARNING))
# default path for data is in ../simulations
if not path:
path = rel_path
else:
path = os.path.join(rel_path, path)
filename = os.path.join(path, filename)
self.open(k, filename) | [
"def",
"load",
"(",
"self",
",",
"rel_path",
"=",
"None",
")",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"layer",
".",
"iteritems",
"(",
")",
":",
"self",
".",
"add",
"(",
"k",
",",
"v",
"[",
"'module'",
"]",
",",
"v",
".",
"get",
"(",
... | Add sim_src to layer. | [
"Add",
"sim_src",
"to",
"layer",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/layers.py#L386-L402 | train | 49,331 |
BreakingBytes/simkit | examples/PVPower/pvpower/formulas/irradiance.py | f_total_irrad | def f_total_irrad(times, surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth, dni, ghi, dhi, dni_extra, am_abs,
model='haydavies'):
"""
Calculate total irradiance
:param times: timestamps
:param surface_tilt: panel tilt from horizontal [deg]
:param surface_azimuth: panel azimuth from north [deg]
:param solar_zenith: refracted solar zenith angle [deg]
:param solar_azimuth: solar azimuth [deg]
:param dni: direct normal irradiance [W/m**2]
:param ghi: global horizonal irradiance [W/m**2]
:param dhi: diffuse horizontal irradiance [W/m**2]
:param dni_extra: extraterrestrial irradiance [W/m**2]
:param am_abs: absolute airmass [dimensionless]
:param model: irradiance model name, default is ``'haydavies'``
:type model: str
:return: global, direct and diffuse plane of array irradiance [W/m**2]
"""
am_abs = am_abs.squeeze()
# make a DataFrame for time series arguments
df = pd.DataFrame(
{'solar_zenith': solar_zenith, 'solar_azimuth': solar_azimuth,
'dni': dni, 'ghi': ghi, 'dhi': dhi, 'dni_extra': dni_extra,
'am_abs': am_abs},
index=times
)
# calculate total irradiance using PVLIB
total_irrad = pvlib.irradiance.total_irrad(
surface_tilt, surface_azimuth, df['solar_zenith'], df['solar_azimuth'],
df['dni'], df['ghi'], df['dhi'], dni_extra=df['dni_extra'],
airmass=df['am_abs'], model=model
).fillna(0.0)
# convert to ndarrays
poa_global = total_irrad['poa_global'].values
poa_direct = total_irrad['poa_direct'].values
poa_diffuse = total_irrad['poa_diffuse'].values
return poa_global, poa_direct, poa_diffuse | python | def f_total_irrad(times, surface_tilt, surface_azimuth, solar_zenith,
solar_azimuth, dni, ghi, dhi, dni_extra, am_abs,
model='haydavies'):
"""
Calculate total irradiance
:param times: timestamps
:param surface_tilt: panel tilt from horizontal [deg]
:param surface_azimuth: panel azimuth from north [deg]
:param solar_zenith: refracted solar zenith angle [deg]
:param solar_azimuth: solar azimuth [deg]
:param dni: direct normal irradiance [W/m**2]
:param ghi: global horizonal irradiance [W/m**2]
:param dhi: diffuse horizontal irradiance [W/m**2]
:param dni_extra: extraterrestrial irradiance [W/m**2]
:param am_abs: absolute airmass [dimensionless]
:param model: irradiance model name, default is ``'haydavies'``
:type model: str
:return: global, direct and diffuse plane of array irradiance [W/m**2]
"""
am_abs = am_abs.squeeze()
# make a DataFrame for time series arguments
df = pd.DataFrame(
{'solar_zenith': solar_zenith, 'solar_azimuth': solar_azimuth,
'dni': dni, 'ghi': ghi, 'dhi': dhi, 'dni_extra': dni_extra,
'am_abs': am_abs},
index=times
)
# calculate total irradiance using PVLIB
total_irrad = pvlib.irradiance.total_irrad(
surface_tilt, surface_azimuth, df['solar_zenith'], df['solar_azimuth'],
df['dni'], df['ghi'], df['dhi'], dni_extra=df['dni_extra'],
airmass=df['am_abs'], model=model
).fillna(0.0)
# convert to ndarrays
poa_global = total_irrad['poa_global'].values
poa_direct = total_irrad['poa_direct'].values
poa_diffuse = total_irrad['poa_diffuse'].values
return poa_global, poa_direct, poa_diffuse | [
"def",
"f_total_irrad",
"(",
"times",
",",
"surface_tilt",
",",
"surface_azimuth",
",",
"solar_zenith",
",",
"solar_azimuth",
",",
"dni",
",",
"ghi",
",",
"dhi",
",",
"dni_extra",
",",
"am_abs",
",",
"model",
"=",
"'haydavies'",
")",
":",
"am_abs",
"=",
"a... | Calculate total irradiance
:param times: timestamps
:param surface_tilt: panel tilt from horizontal [deg]
:param surface_azimuth: panel azimuth from north [deg]
:param solar_zenith: refracted solar zenith angle [deg]
:param solar_azimuth: solar azimuth [deg]
:param dni: direct normal irradiance [W/m**2]
:param ghi: global horizonal irradiance [W/m**2]
:param dhi: diffuse horizontal irradiance [W/m**2]
:param dni_extra: extraterrestrial irradiance [W/m**2]
:param am_abs: absolute airmass [dimensionless]
:param model: irradiance model name, default is ``'haydavies'``
:type model: str
:return: global, direct and diffuse plane of array irradiance [W/m**2] | [
"Calculate",
"total",
"irradiance"
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/examples/PVPower/pvpower/formulas/irradiance.py#L63-L101 | train | 49,332 |
TriOptima/tri.declarative | lib/tri/declarative/__init__.py | creation_ordered | def creation_ordered(class_to_decorate):
"""
Class decorator that ensures that instances will be ordered after creation order when sorted.
:type class_to_decorate: class
:rtype: class
"""
next_index = functools.partial(next, itertools.count())
__init__orig = class_to_decorate.__init__
@functools.wraps(__init__orig, assigned=['__doc__'])
def __init__(self, *args, **kwargs):
object.__setattr__(self, '_index', next_index())
__init__orig(self, *args, **kwargs)
setattr(class_to_decorate, '__init__', __init__)
# noinspection PyProtectedMember
def __lt__(self, other):
return self._index < other._index # pragma: no mutate
setattr(class_to_decorate, '__lt__', __lt__)
class_to_decorate = functools.total_ordering(class_to_decorate)
return class_to_decorate | python | def creation_ordered(class_to_decorate):
"""
Class decorator that ensures that instances will be ordered after creation order when sorted.
:type class_to_decorate: class
:rtype: class
"""
next_index = functools.partial(next, itertools.count())
__init__orig = class_to_decorate.__init__
@functools.wraps(__init__orig, assigned=['__doc__'])
def __init__(self, *args, **kwargs):
object.__setattr__(self, '_index', next_index())
__init__orig(self, *args, **kwargs)
setattr(class_to_decorate, '__init__', __init__)
# noinspection PyProtectedMember
def __lt__(self, other):
return self._index < other._index # pragma: no mutate
setattr(class_to_decorate, '__lt__', __lt__)
class_to_decorate = functools.total_ordering(class_to_decorate)
return class_to_decorate | [
"def",
"creation_ordered",
"(",
"class_to_decorate",
")",
":",
"next_index",
"=",
"functools",
".",
"partial",
"(",
"next",
",",
"itertools",
".",
"count",
"(",
")",
")",
"__init__orig",
"=",
"class_to_decorate",
".",
"__init__",
"@",
"functools",
".",
"wraps"... | Class decorator that ensures that instances will be ordered after creation order when sorted.
:type class_to_decorate: class
:rtype: class | [
"Class",
"decorator",
"that",
"ensures",
"that",
"instances",
"will",
"be",
"ordered",
"after",
"creation",
"order",
"when",
"sorted",
"."
] | 13d90d4c2a10934e37a4139e63d51a859fb3e303 | https://github.com/TriOptima/tri.declarative/blob/13d90d4c2a10934e37a4139e63d51a859fb3e303/lib/tri/declarative/__init__.py#L67-L94 | train | 49,333 |
TriOptima/tri.declarative | lib/tri/declarative/__init__.py | get_members | def get_members(cls, member_class=None, is_member=None, sort_key=None, _parameter=None):
"""
Collect all class level attributes matching the given criteria.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
"""
if member_class is None and is_member is None:
raise TypeError("get_members either needs a member_class parameter or an is_member check function (or both)")
members = OrderedDict()
for base in cls.__bases__:
if _parameter is None:
inherited_members = get_members(base, member_class=member_class, is_member=is_member, sort_key=sort_key)
else:
# When user by @declarative, only traverse up the class inheritance to the decorated class.
inherited_members = get_declared(base, _parameter)
members.update(inherited_members)
def generate_member_bindings():
for name in cls.__dict__:
if name.startswith('__'):
continue
obj = getattr(cls, name)
if member_class is not None and isinstance(obj, member_class):
yield name, obj
elif is_member is not None and is_member(obj):
yield name, obj
elif type(obj) is tuple and len(obj) == 1 and isinstance(obj[0], member_class):
raise TypeError("'%s' is a one-tuple containing what we are looking for. Trailing comma much? Don't... just don't." % name) # pragma: no mutate
bindings = generate_member_bindings()
if sort_key is not None:
try:
sorted_bindings = sorted(bindings, key=lambda x: sort_key(x[1]))
except AttributeError:
if sort_key is default_sort_key:
raise TypeError('Missing member ordering definition. Use @creation_ordered or specify sort_key')
else: # pragma: no covererage
raise
members.update(sorted_bindings)
else:
members.update(bindings)
return members | python | def get_members(cls, member_class=None, is_member=None, sort_key=None, _parameter=None):
"""
Collect all class level attributes matching the given criteria.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object
"""
if member_class is None and is_member is None:
raise TypeError("get_members either needs a member_class parameter or an is_member check function (or both)")
members = OrderedDict()
for base in cls.__bases__:
if _parameter is None:
inherited_members = get_members(base, member_class=member_class, is_member=is_member, sort_key=sort_key)
else:
# When user by @declarative, only traverse up the class inheritance to the decorated class.
inherited_members = get_declared(base, _parameter)
members.update(inherited_members)
def generate_member_bindings():
for name in cls.__dict__:
if name.startswith('__'):
continue
obj = getattr(cls, name)
if member_class is not None and isinstance(obj, member_class):
yield name, obj
elif is_member is not None and is_member(obj):
yield name, obj
elif type(obj) is tuple and len(obj) == 1 and isinstance(obj[0], member_class):
raise TypeError("'%s' is a one-tuple containing what we are looking for. Trailing comma much? Don't... just don't." % name) # pragma: no mutate
bindings = generate_member_bindings()
if sort_key is not None:
try:
sorted_bindings = sorted(bindings, key=lambda x: sort_key(x[1]))
except AttributeError:
if sort_key is default_sort_key:
raise TypeError('Missing member ordering definition. Use @creation_ordered or specify sort_key')
else: # pragma: no covererage
raise
members.update(sorted_bindings)
else:
members.update(bindings)
return members | [
"def",
"get_members",
"(",
"cls",
",",
"member_class",
"=",
"None",
",",
"is_member",
"=",
"None",
",",
"sort_key",
"=",
"None",
",",
"_parameter",
"=",
"None",
")",
":",
"if",
"member_class",
"is",
"None",
"and",
"is_member",
"is",
"None",
":",
"raise",... | Collect all class level attributes matching the given criteria.
:param class member_class: Class(es) to collect
:param is_member: Function to determine if an object should be collected
:param sort_key: Function to invoke on members to obtain ordering (Default is to use ordering from `creation_ordered`)
:type is_member: (object) -> bool
:type sort_key: (object) -> object | [
"Collect",
"all",
"class",
"level",
"attributes",
"matching",
"the",
"given",
"criteria",
"."
] | 13d90d4c2a10934e37a4139e63d51a859fb3e303 | https://github.com/TriOptima/tri.declarative/blob/13d90d4c2a10934e37a4139e63d51a859fb3e303/lib/tri/declarative/__init__.py#L102-L151 | train | 49,334 |
TriOptima/tri.declarative | lib/tri/declarative/__init__.py | generate_rst_docs | def generate_rst_docs(directory, classes, missing_objects=None):
"""
Generate documentation for tri.declarative APIs
:param directory: directory to write the .rst files into
:param classes: list of classes to generate documentation for
:param missing_objects: tuple of objects to count as missing markers, if applicable
"""
doc_by_filename = _generate_rst_docs(classes=classes, missing_objects=missing_objects) # pragma: no mutate
for filename, doc in doc_by_filename: # pragma: no mutate
with open(directory + filename, 'w') as f2: # pragma: no mutate
f2.write(doc) | python | def generate_rst_docs(directory, classes, missing_objects=None):
"""
Generate documentation for tri.declarative APIs
:param directory: directory to write the .rst files into
:param classes: list of classes to generate documentation for
:param missing_objects: tuple of objects to count as missing markers, if applicable
"""
doc_by_filename = _generate_rst_docs(classes=classes, missing_objects=missing_objects) # pragma: no mutate
for filename, doc in doc_by_filename: # pragma: no mutate
with open(directory + filename, 'w') as f2: # pragma: no mutate
f2.write(doc) | [
"def",
"generate_rst_docs",
"(",
"directory",
",",
"classes",
",",
"missing_objects",
"=",
"None",
")",
":",
"doc_by_filename",
"=",
"_generate_rst_docs",
"(",
"classes",
"=",
"classes",
",",
"missing_objects",
"=",
"missing_objects",
")",
"# pragma: no mutate",
"fo... | Generate documentation for tri.declarative APIs
:param directory: directory to write the .rst files into
:param classes: list of classes to generate documentation for
:param missing_objects: tuple of objects to count as missing markers, if applicable | [
"Generate",
"documentation",
"for",
"tri",
".",
"declarative",
"APIs"
] | 13d90d4c2a10934e37a4139e63d51a859fb3e303 | https://github.com/TriOptima/tri.declarative/blob/13d90d4c2a10934e37a4139e63d51a859fb3e303/lib/tri/declarative/__init__.py#L844-L856 | train | 49,335 |
BreakingBytes/simkit | simkit/contrib/lazy_looping_calculator.py | reg_copy | def reg_copy(reg, keys=None):
"""
Make a copy of a subset of a registry.
:param reg: source registry
:param keys: keys of registry items to copy
:return: copied registry subset
"""
if keys is None:
keys = reg.keys()
reg_cls = type(reg)
new_reg = reg_cls()
mk = {} # empty dictionary for meta keys
# loop over registry meta names
for m in reg_cls.meta_names:
mstar = getattr(reg, m, None) # current value of metakey in registry
if not mstar:
# if there is no value, the value is empty or None, set it to None
# it's never false or zero, should be dictionary of reg items
mk[m] = None
continue
mk[m] = {} # emtpy dictionary of registry meta
# loop over keys to copy and set values of meta keys for each reg item
for k in keys:
kstar = mstar.get(k)
# if key exists in registry meta and is not None, then copy it
if kstar is not None:
mk[m][k] = kstar
new_reg.register({k: reg[k] for k in keys}, **mk)
return new_reg | python | def reg_copy(reg, keys=None):
"""
Make a copy of a subset of a registry.
:param reg: source registry
:param keys: keys of registry items to copy
:return: copied registry subset
"""
if keys is None:
keys = reg.keys()
reg_cls = type(reg)
new_reg = reg_cls()
mk = {} # empty dictionary for meta keys
# loop over registry meta names
for m in reg_cls.meta_names:
mstar = getattr(reg, m, None) # current value of metakey in registry
if not mstar:
# if there is no value, the value is empty or None, set it to None
# it's never false or zero, should be dictionary of reg items
mk[m] = None
continue
mk[m] = {} # emtpy dictionary of registry meta
# loop over keys to copy and set values of meta keys for each reg item
for k in keys:
kstar = mstar.get(k)
# if key exists in registry meta and is not None, then copy it
if kstar is not None:
mk[m][k] = kstar
new_reg.register({k: reg[k] for k in keys}, **mk)
return new_reg | [
"def",
"reg_copy",
"(",
"reg",
",",
"keys",
"=",
"None",
")",
":",
"if",
"keys",
"is",
"None",
":",
"keys",
"=",
"reg",
".",
"keys",
"(",
")",
"reg_cls",
"=",
"type",
"(",
"reg",
")",
"new_reg",
"=",
"reg_cls",
"(",
")",
"mk",
"=",
"{",
"}",
... | Make a copy of a subset of a registry.
:param reg: source registry
:param keys: keys of registry items to copy
:return: copied registry subset | [
"Make",
"a",
"copy",
"of",
"a",
"subset",
"of",
"a",
"registry",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/contrib/lazy_looping_calculator.py#L14-L43 | train | 49,336 |
volafiled/python-volapi | volapi/volapi.py | listen_many | def listen_many(*rooms):
"""Listen for changes in all registered listeners in all specified rooms"""
rooms = set(r.conn for r in rooms)
for room in rooms:
room.validate_listeners()
with ARBITRATOR.condition:
while any(r.connected for r in rooms):
ARBITRATOR.condition.wait()
rooms = [r for r in rooms if r.run_queues()]
if not rooms:
return | python | def listen_many(*rooms):
"""Listen for changes in all registered listeners in all specified rooms"""
rooms = set(r.conn for r in rooms)
for room in rooms:
room.validate_listeners()
with ARBITRATOR.condition:
while any(r.connected for r in rooms):
ARBITRATOR.condition.wait()
rooms = [r for r in rooms if r.run_queues()]
if not rooms:
return | [
"def",
"listen_many",
"(",
"*",
"rooms",
")",
":",
"rooms",
"=",
"set",
"(",
"r",
".",
"conn",
"for",
"r",
"in",
"rooms",
")",
"for",
"room",
"in",
"rooms",
":",
"room",
".",
"validate_listeners",
"(",
")",
"with",
"ARBITRATOR",
".",
"condition",
":"... | Listen for changes in all registered listeners in all specified rooms | [
"Listen",
"for",
"changes",
"in",
"all",
"registered",
"listeners",
"in",
"all",
"specified",
"rooms"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L907-L918 | train | 49,337 |
volafiled/python-volapi | volapi/volapi.py | Connection.connect | def connect(self, username, checksum, password=None, key=None):
"""Connect to websocket through asyncio http interface"""
ws_url = (
f"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}"
f"&rn={random_id(6)}&t={int(time.time() * 1000)}&transport=websocket&EIO=3"
)
if password:
ws_url += f"&password={password}"
elif key:
ws_url += f"&key={key}"
ARBITRATOR.create_connection(
self.proto, ws_url, self.headers["User-Agent"], self.cookies
)
self.__conn_barrier.wait() | python | def connect(self, username, checksum, password=None, key=None):
"""Connect to websocket through asyncio http interface"""
ws_url = (
f"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}"
f"&rn={random_id(6)}&t={int(time.time() * 1000)}&transport=websocket&EIO=3"
)
if password:
ws_url += f"&password={password}"
elif key:
ws_url += f"&key={key}"
ARBITRATOR.create_connection(
self.proto, ws_url, self.headers["User-Agent"], self.cookies
)
self.__conn_barrier.wait() | [
"def",
"connect",
"(",
"self",
",",
"username",
",",
"checksum",
",",
"password",
"=",
"None",
",",
"key",
"=",
"None",
")",
":",
"ws_url",
"=",
"(",
"f\"{BASE_WS_URL}?room={self.room.room_id}&cs={checksum}&nick={username}\"",
"f\"&rn={random_id(6)}&t={int(time.time() * 1... | Connect to websocket through asyncio http interface | [
"Connect",
"to",
"websocket",
"through",
"asyncio",
"http",
"interface"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L74-L89 | train | 49,338 |
volafiled/python-volapi | volapi/volapi.py | Connection.send_ack | def send_ack(self):
"""Send an ack message"""
if self.last_ack == self.proto.max_id:
return
LOGGER.debug("ack (%d)", self.proto.max_id)
self.last_ack = self.proto.max_id
self.send_message(f"4{to_json([self.proto.max_id])}") | python | def send_ack(self):
"""Send an ack message"""
if self.last_ack == self.proto.max_id:
return
LOGGER.debug("ack (%d)", self.proto.max_id)
self.last_ack = self.proto.max_id
self.send_message(f"4{to_json([self.proto.max_id])}") | [
"def",
"send_ack",
"(",
"self",
")",
":",
"if",
"self",
".",
"last_ack",
"==",
"self",
".",
"proto",
".",
"max_id",
":",
"return",
"LOGGER",
".",
"debug",
"(",
"\"ack (%d)\"",
",",
"self",
".",
"proto",
".",
"max_id",
")",
"self",
".",
"last_ack",
"=... | Send an ack message | [
"Send",
"an",
"ack",
"message"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L112-L119 | train | 49,339 |
volafiled/python-volapi | volapi/volapi.py | Connection.make_call | def make_call(self, fun, *args):
"""Makes a regular API call"""
obj = {"fn": fun, "args": list(args)}
obj = [self.proto.max_id, [[0, ["call", obj]], self.proto.send_count]]
self.send_message(f"4{to_json(obj)}")
self.proto.send_count += 1 | python | def make_call(self, fun, *args):
"""Makes a regular API call"""
obj = {"fn": fun, "args": list(args)}
obj = [self.proto.max_id, [[0, ["call", obj]], self.proto.send_count]]
self.send_message(f"4{to_json(obj)}")
self.proto.send_count += 1 | [
"def",
"make_call",
"(",
"self",
",",
"fun",
",",
"*",
"args",
")",
":",
"obj",
"=",
"{",
"\"fn\"",
":",
"fun",
",",
"\"args\"",
":",
"list",
"(",
"args",
")",
"}",
"obj",
"=",
"[",
"self",
".",
"proto",
".",
"max_id",
",",
"[",
"[",
"0",
","... | Makes a regular API call | [
"Makes",
"a",
"regular",
"API",
"call"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L121-L127 | train | 49,340 |
volafiled/python-volapi | volapi/volapi.py | Connection.make_call_with_cb | def make_call_with_cb(self, fun, *args):
"""Makes an API call with a callback to wait for"""
cid, event = self.handler.register_callback()
argscp = list(args)
argscp.append(cid)
self.make_call(fun, *argscp)
return event | python | def make_call_with_cb(self, fun, *args):
"""Makes an API call with a callback to wait for"""
cid, event = self.handler.register_callback()
argscp = list(args)
argscp.append(cid)
self.make_call(fun, *argscp)
return event | [
"def",
"make_call_with_cb",
"(",
"self",
",",
"fun",
",",
"*",
"args",
")",
":",
"cid",
",",
"event",
"=",
"self",
".",
"handler",
".",
"register_callback",
"(",
")",
"argscp",
"=",
"list",
"(",
"args",
")",
"argscp",
".",
"append",
"(",
"cid",
")",
... | Makes an API call with a callback to wait for | [
"Makes",
"an",
"API",
"call",
"with",
"a",
"callback",
"to",
"wait",
"for"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L129-L136 | train | 49,341 |
volafiled/python-volapi | volapi/volapi.py | Connection.make_api_call | def make_api_call(self, call, params):
"""Make a REST API call"""
if not isinstance(params, dict):
raise ValueError("params argument must be a dictionary")
kw = dict(
params=params,
headers={"Origin": BASE_URL, "Referer": f"{BASE_URL}/r/{self.room.name}"},
)
return self.get(BASE_REST_URL + call, **kw).json() | python | def make_api_call(self, call, params):
"""Make a REST API call"""
if not isinstance(params, dict):
raise ValueError("params argument must be a dictionary")
kw = dict(
params=params,
headers={"Origin": BASE_URL, "Referer": f"{BASE_URL}/r/{self.room.name}"},
)
return self.get(BASE_REST_URL + call, **kw).json() | [
"def",
"make_api_call",
"(",
"self",
",",
"call",
",",
"params",
")",
":",
"if",
"not",
"isinstance",
"(",
"params",
",",
"dict",
")",
":",
"raise",
"ValueError",
"(",
"\"params argument must be a dictionary\"",
")",
"kw",
"=",
"dict",
"(",
"params",
"=",
... | Make a REST API call | [
"Make",
"a",
"REST",
"API",
"call"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L138-L147 | train | 49,342 |
volafiled/python-volapi | volapi/volapi.py | Connection.reraise | def reraise(self, ex):
"""Reraise an exception passed by the event thread"""
self.exception = ex
self.process_queues(forced=True) | python | def reraise(self, ex):
"""Reraise an exception passed by the event thread"""
self.exception = ex
self.process_queues(forced=True) | [
"def",
"reraise",
"(",
"self",
",",
"ex",
")",
":",
"self",
".",
"exception",
"=",
"ex",
"self",
".",
"process_queues",
"(",
"forced",
"=",
"True",
")"
] | Reraise an exception passed by the event thread | [
"Reraise",
"an",
"exception",
"passed",
"by",
"the",
"event",
"thread"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L149-L153 | train | 49,343 |
volafiled/python-volapi | volapi/volapi.py | Connection.close | def close(self):
"""Closes connection pair"""
if self.connected:
obj = [self.proto.max_id, [[2], self.proto.send_count]]
ARBITRATOR.send_sync_message(self.proto, f"4{to_json(obj)}")
self.proto.send_count += 1
ARBITRATOR.close(self.proto)
self.listeners.clear()
self.proto.connected = False
super().close()
del self.room
del self.proto | python | def close(self):
"""Closes connection pair"""
if self.connected:
obj = [self.proto.max_id, [[2], self.proto.send_count]]
ARBITRATOR.send_sync_message(self.proto, f"4{to_json(obj)}")
self.proto.send_count += 1
ARBITRATOR.close(self.proto)
self.listeners.clear()
self.proto.connected = False
super().close()
del self.room
del self.proto | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"connected",
":",
"obj",
"=",
"[",
"self",
".",
"proto",
".",
"max_id",
",",
"[",
"[",
"2",
"]",
",",
"self",
".",
"proto",
".",
"send_count",
"]",
"]",
"ARBITRATOR",
".",
"send_sync_message... | Closes connection pair | [
"Closes",
"connection",
"pair"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L155-L167 | train | 49,344 |
volafiled/python-volapi | volapi/volapi.py | Connection.on_open | async def on_open(self):
"""DingDongmaster the connection is open"""
self.__ensure_barrier()
while self.connected:
try:
if self.__lastping > self.__lastpong:
raise IOError("Last ping remained unanswered")
self.send_message("2")
self.send_ack()
self.__lastping = time.time()
await asyncio.sleep(self.ping_interval)
except Exception as ex:
LOGGER.exception("Failed to ping")
try:
self.reraise(ex)
except Exception:
LOGGER.exception(
"failed to force close connection after ping error"
)
break | python | async def on_open(self):
"""DingDongmaster the connection is open"""
self.__ensure_barrier()
while self.connected:
try:
if self.__lastping > self.__lastpong:
raise IOError("Last ping remained unanswered")
self.send_message("2")
self.send_ack()
self.__lastping = time.time()
await asyncio.sleep(self.ping_interval)
except Exception as ex:
LOGGER.exception("Failed to ping")
try:
self.reraise(ex)
except Exception:
LOGGER.exception(
"failed to force close connection after ping error"
)
break | [
"async",
"def",
"on_open",
"(",
"self",
")",
":",
"self",
".",
"__ensure_barrier",
"(",
")",
"while",
"self",
".",
"connected",
":",
"try",
":",
"if",
"self",
".",
"__lastping",
">",
"self",
".",
"__lastpong",
":",
"raise",
"IOError",
"(",
"\"Last ping r... | DingDongmaster the connection is open | [
"DingDongmaster",
"the",
"connection",
"is",
"open"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L174-L195 | train | 49,345 |
volafiled/python-volapi | volapi/volapi.py | Connection.on_message | def on_message(self, new_data):
"""Processes incoming messages according to engine-io rules"""
# https://github.com/socketio/engine.io-protocol
LOGGER.debug("new frame [%r]", new_data)
try:
what = int(new_data[0])
data = new_data[1:]
data = data and from_json(data)
if what == 0:
self.ping_interval = float(data["pingInterval"]) / 1000
LOGGER.debug("adjusted ping interval")
return
if what == 1:
LOGGER.debug("received close")
self.reraise(IOError("Connection closed remotely"))
return
if what == 3:
self.__lastpong = time.time()
LOGGER.debug("received a pong")
return
if what == 4:
self._on_frame(data)
return
if what == 6:
LOGGER.debug("received noop")
self.send_message("5")
return
LOGGER.debug("unhandled message: [%d] [%r]", what, data)
except Exception as ex:
self.reraise(ex) | python | def on_message(self, new_data):
"""Processes incoming messages according to engine-io rules"""
# https://github.com/socketio/engine.io-protocol
LOGGER.debug("new frame [%r]", new_data)
try:
what = int(new_data[0])
data = new_data[1:]
data = data and from_json(data)
if what == 0:
self.ping_interval = float(data["pingInterval"]) / 1000
LOGGER.debug("adjusted ping interval")
return
if what == 1:
LOGGER.debug("received close")
self.reraise(IOError("Connection closed remotely"))
return
if what == 3:
self.__lastpong = time.time()
LOGGER.debug("received a pong")
return
if what == 4:
self._on_frame(data)
return
if what == 6:
LOGGER.debug("received noop")
self.send_message("5")
return
LOGGER.debug("unhandled message: [%d] [%r]", what, data)
except Exception as ex:
self.reraise(ex) | [
"def",
"on_message",
"(",
"self",
",",
"new_data",
")",
":",
"# https://github.com/socketio/engine.io-protocol",
"LOGGER",
".",
"debug",
"(",
"\"new frame [%r]\"",
",",
"new_data",
")",
"try",
":",
"what",
"=",
"int",
"(",
"new_data",
"[",
"0",
"]",
")",
"data... | Processes incoming messages according to engine-io rules | [
"Processes",
"incoming",
"messages",
"according",
"to",
"engine",
"-",
"io",
"rules"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L232-L267 | train | 49,346 |
volafiled/python-volapi | volapi/volapi.py | Connection.add_listener | def add_listener(self, event_type, callback):
"""Add a listener for specific event type.
You'll need to actually listen for changes using the listen method"""
if not self.connected:
# wait for errors set by reraise method
time.sleep(1)
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
raise ConnectionError(f"{self.room} is not connected")
thread = get_thread_ident()
with self.lock:
listener = self.listeners[thread]
listener.add(event_type, callback)
# use "initial_files" event to listen for whole filelist on room join
self.process_queues() | python | def add_listener(self, event_type, callback):
"""Add a listener for specific event type.
You'll need to actually listen for changes using the listen method"""
if not self.connected:
# wait for errors set by reraise method
time.sleep(1)
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
raise ConnectionError(f"{self.room} is not connected")
thread = get_thread_ident()
with self.lock:
listener = self.listeners[thread]
listener.add(event_type, callback)
# use "initial_files" event to listen for whole filelist on room join
self.process_queues() | [
"def",
"add_listener",
"(",
"self",
",",
"event_type",
",",
"callback",
")",
":",
"if",
"not",
"self",
".",
"connected",
":",
"# wait for errors set by reraise method",
"time",
".",
"sleep",
"(",
"1",
")",
"if",
"self",
".",
"exception",
":",
"# pylint: disabl... | Add a listener for specific event type.
You'll need to actually listen for changes using the listen method | [
"Add",
"a",
"listener",
"for",
"specific",
"event",
"type",
".",
"You",
"ll",
"need",
"to",
"actually",
"listen",
"for",
"changes",
"using",
"the",
"listen",
"method"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L269-L286 | train | 49,347 |
volafiled/python-volapi | volapi/volapi.py | Connection.enqueue_data | def enqueue_data(self, event_type, data):
"""Enqueue a data item for specific event type"""
with self.lock:
listeners = self.listeners.values()
for listener in listeners:
listener.enqueue(event_type, data)
self.must_process = True | python | def enqueue_data(self, event_type, data):
"""Enqueue a data item for specific event type"""
with self.lock:
listeners = self.listeners.values()
for listener in listeners:
listener.enqueue(event_type, data)
self.must_process = True | [
"def",
"enqueue_data",
"(",
"self",
",",
"event_type",
",",
"data",
")",
":",
"with",
"self",
".",
"lock",
":",
"listeners",
"=",
"self",
".",
"listeners",
".",
"values",
"(",
")",
"for",
"listener",
"in",
"listeners",
":",
"listener",
".",
"enqueue",
... | Enqueue a data item for specific event type | [
"Enqueue",
"a",
"data",
"item",
"for",
"specific",
"event",
"type"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L288-L295 | train | 49,348 |
volafiled/python-volapi | volapi/volapi.py | Connection.process_queues | def process_queues(self, forced=False):
"""Process queues if any have data queued"""
with self.lock:
if (not forced and not self.must_process) or not self.queues_enabled:
return
self.must_process = False
ARBITRATOR.awaken() | python | def process_queues(self, forced=False):
"""Process queues if any have data queued"""
with self.lock:
if (not forced and not self.must_process) or not self.queues_enabled:
return
self.must_process = False
ARBITRATOR.awaken() | [
"def",
"process_queues",
"(",
"self",
",",
"forced",
"=",
"False",
")",
":",
"with",
"self",
".",
"lock",
":",
"if",
"(",
"not",
"forced",
"and",
"not",
"self",
".",
"must_process",
")",
"or",
"not",
"self",
".",
"queues_enabled",
":",
"return",
"self"... | Process queues if any have data queued | [
"Process",
"queues",
"if",
"any",
"have",
"data",
"queued"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L310-L317 | train | 49,349 |
volafiled/python-volapi | volapi/volapi.py | Connection.__listeners_for_thread | def __listeners_for_thread(self):
"""All Listeners for the current thread"""
thread = get_thread_ident()
with self.lock:
return [l for tid, l in self.listeners.items() if tid == thread] | python | def __listeners_for_thread(self):
"""All Listeners for the current thread"""
thread = get_thread_ident()
with self.lock:
return [l for tid, l in self.listeners.items() if tid == thread] | [
"def",
"__listeners_for_thread",
"(",
"self",
")",
":",
"thread",
"=",
"get_thread_ident",
"(",
")",
"with",
"self",
".",
"lock",
":",
"return",
"[",
"l",
"for",
"tid",
",",
"l",
"in",
"self",
".",
"listeners",
".",
"items",
"(",
")",
"if",
"tid",
"=... | All Listeners for the current thread | [
"All",
"Listeners",
"for",
"the",
"current",
"thread"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L320-L325 | train | 49,350 |
volafiled/python-volapi | volapi/volapi.py | Connection.validate_listeners | def validate_listeners(self):
"""Validates that some listeners are actually registered"""
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
listeners = self.__listeners_for_thread
if not sum(len(l) for l in listeners):
raise ValueError("No active listeners") | python | def validate_listeners(self):
"""Validates that some listeners are actually registered"""
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
listeners = self.__listeners_for_thread
if not sum(len(l) for l in listeners):
raise ValueError("No active listeners") | [
"def",
"validate_listeners",
"(",
"self",
")",
":",
"if",
"self",
".",
"exception",
":",
"# pylint: disable=raising-bad-type",
"raise",
"self",
".",
"exception",
"listeners",
"=",
"self",
".",
"__listeners_for_thread",
"if",
"not",
"sum",
"(",
"len",
"(",
"l",
... | Validates that some listeners are actually registered | [
"Validates",
"that",
"some",
"listeners",
"are",
"actually",
"registered"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L327-L336 | train | 49,351 |
volafiled/python-volapi | volapi/volapi.py | Connection.listen | def listen(self):
"""Listen for changes in all registered listeners."""
self.validate_listeners()
with ARBITRATOR.condition:
while self.connected:
ARBITRATOR.condition.wait()
if not self.run_queues():
break | python | def listen(self):
"""Listen for changes in all registered listeners."""
self.validate_listeners()
with ARBITRATOR.condition:
while self.connected:
ARBITRATOR.condition.wait()
if not self.run_queues():
break | [
"def",
"listen",
"(",
"self",
")",
":",
"self",
".",
"validate_listeners",
"(",
")",
"with",
"ARBITRATOR",
".",
"condition",
":",
"while",
"self",
".",
"connected",
":",
"ARBITRATOR",
".",
"condition",
".",
"wait",
"(",
")",
"if",
"not",
"self",
".",
"... | Listen for changes in all registered listeners. | [
"Listen",
"for",
"changes",
"in",
"all",
"registered",
"listeners",
"."
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L338-L346 | train | 49,352 |
volafiled/python-volapi | volapi/volapi.py | Connection.run_queues | def run_queues(self):
"""Run all queues that have data queued"""
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
listeners = self.__listeners_for_thread
return sum(l.process() for l in listeners) > 0 | python | def run_queues(self):
"""Run all queues that have data queued"""
if self.exception:
# pylint: disable=raising-bad-type
raise self.exception
listeners = self.__listeners_for_thread
return sum(l.process() for l in listeners) > 0 | [
"def",
"run_queues",
"(",
"self",
")",
":",
"if",
"self",
".",
"exception",
":",
"# pylint: disable=raising-bad-type",
"raise",
"self",
".",
"exception",
"listeners",
"=",
"self",
".",
"__listeners_for_thread",
"return",
"sum",
"(",
"l",
".",
"process",
"(",
"... | Run all queues that have data queued | [
"Run",
"all",
"queues",
"that",
"have",
"data",
"queued"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L348-L355 | train | 49,353 |
volafiled/python-volapi | volapi/volapi.py | Room.__add_prop | def __add_prop(self, key, admin=False):
"""Add gettable and settable room config property during runtime"""
def getter(self):
return self.config[key]
def setter(self, val):
if admin and not self.admin:
raise RuntimeError(
f"You can't set the {key} key without mod privileges"
)
self.__set_config_value(self.config.get_real_key(key), val)
setattr(self.__class__, key, property(getter, setter)) | python | def __add_prop(self, key, admin=False):
"""Add gettable and settable room config property during runtime"""
def getter(self):
return self.config[key]
def setter(self, val):
if admin and not self.admin:
raise RuntimeError(
f"You can't set the {key} key without mod privileges"
)
self.__set_config_value(self.config.get_real_key(key), val)
setattr(self.__class__, key, property(getter, setter)) | [
"def",
"__add_prop",
"(",
"self",
",",
"key",
",",
"admin",
"=",
"False",
")",
":",
"def",
"getter",
"(",
"self",
")",
":",
"return",
"self",
".",
"config",
"[",
"key",
"]",
"def",
"setter",
"(",
"self",
",",
"val",
")",
":",
"if",
"admin",
"and"... | Add gettable and settable room config property during runtime | [
"Add",
"gettable",
"and",
"settable",
"room",
"config",
"property",
"during",
"runtime"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L417-L430 | train | 49,354 |
volafiled/python-volapi | volapi/volapi.py | Room.__set_config_value | def __set_config_value(self, key, value):
"""Sets a value for a room config"""
self.check_owner()
params = {"room": self.room_id, "config": to_json({key: value})}
resp = self.conn.make_api_call("setRoomConfig", params)
if "error" in resp:
raise RuntimeError(f"{resp['error'].get('message') or resp['error']}")
return resp | python | def __set_config_value(self, key, value):
"""Sets a value for a room config"""
self.check_owner()
params = {"room": self.room_id, "config": to_json({key: value})}
resp = self.conn.make_api_call("setRoomConfig", params)
if "error" in resp:
raise RuntimeError(f"{resp['error'].get('message') or resp['error']}")
return resp | [
"def",
"__set_config_value",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"self",
".",
"check_owner",
"(",
")",
"params",
"=",
"{",
"\"room\"",
":",
"self",
".",
"room_id",
",",
"\"config\"",
":",
"to_json",
"(",
"{",
"key",
":",
"value",
"}",
")... | Sets a value for a room config | [
"Sets",
"a",
"value",
"for",
"a",
"room",
"config"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L432-L440 | train | 49,355 |
volafiled/python-volapi | volapi/volapi.py | Room.listen | def listen(self, once=False):
"""Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information """
if once:
# we listen for time event and return false so our
# run_queues function will be also falsy and break the loop
self.add_listener("time", lambda _: False)
return self.conn.listen() | python | def listen(self, once=False):
"""Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information """
if once:
# we listen for time event and return false so our
# run_queues function will be also falsy and break the loop
self.add_listener("time", lambda _: False)
return self.conn.listen() | [
"def",
"listen",
"(",
"self",
",",
"once",
"=",
"False",
")",
":",
"if",
"once",
":",
"# we listen for time event and return false so our",
"# run_queues function will be also falsy and break the loop",
"self",
".",
"add_listener",
"(",
"\"time\"",
",",
"lambda",
"_",
"... | Listen for changes in all registered listeners.
Use add_listener before calling this funcion to listen for desired
events or set `once` to True to listen for initial room information | [
"Listen",
"for",
"changes",
"in",
"all",
"registered",
"listeners",
".",
"Use",
"add_listener",
"before",
"calling",
"this",
"funcion",
"to",
"listen",
"for",
"desired",
"events",
"or",
"set",
"once",
"to",
"True",
"to",
"listen",
"for",
"initial",
"room",
"... | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L497-L506 | train | 49,356 |
volafiled/python-volapi | volapi/volapi.py | Room.__expire_files | def __expire_files(self):
"""Because files are always unclean"""
self.__files = OrderedDict(
item for item in self.__files.items() if not item[1].expired
) | python | def __expire_files(self):
"""Because files are always unclean"""
self.__files = OrderedDict(
item for item in self.__files.items() if not item[1].expired
) | [
"def",
"__expire_files",
"(",
"self",
")",
":",
"self",
".",
"__files",
"=",
"OrderedDict",
"(",
"item",
"for",
"item",
"in",
"self",
".",
"__files",
".",
"items",
"(",
")",
"if",
"not",
"item",
"[",
"1",
"]",
".",
"expired",
")"
] | Because files are always unclean | [
"Because",
"files",
"are",
"always",
"unclean"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L529-L534 | train | 49,357 |
volafiled/python-volapi | volapi/volapi.py | Room.filedict | def filedict(self, kv):
"""Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user."""
k, v = kv
if v is not None:
self.__files.update({k: v})
else:
with suppress(KeyError):
del self.__files[k] | python | def filedict(self, kv):
"""Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user."""
k, v = kv
if v is not None:
self.__files.update({k: v})
else:
with suppress(KeyError):
del self.__files[k] | [
"def",
"filedict",
"(",
"self",
",",
"kv",
")",
":",
"k",
",",
"v",
"=",
"kv",
"if",
"v",
"is",
"not",
"None",
":",
"self",
".",
"__files",
".",
"update",
"(",
"{",
"k",
":",
"v",
"}",
")",
"else",
":",
"with",
"suppress",
"(",
"KeyError",
")... | Updates filedict with single file entry or deletes given
key if the value is False. Shouldn't be used by the user. | [
"Updates",
"filedict",
"with",
"single",
"file",
"entry",
"or",
"deletes",
"given",
"key",
"if",
"the",
"value",
"is",
"False",
".",
"Shouldn",
"t",
"be",
"used",
"by",
"the",
"user",
"."
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L555-L564 | train | 49,358 |
volafiled/python-volapi | volapi/volapi.py | Room.get_user_stats | def get_user_stats(self, name):
"""Return data about the given user. Returns None if user
does not exist."""
req = self.conn.get(BASE_URL + "/user/" + name)
if req.status_code != 200 or not name:
return None
return self.conn.make_api_call("getUserInfo", {"name": name}) | python | def get_user_stats(self, name):
"""Return data about the given user. Returns None if user
does not exist."""
req = self.conn.get(BASE_URL + "/user/" + name)
if req.status_code != 200 or not name:
return None
return self.conn.make_api_call("getUserInfo", {"name": name}) | [
"def",
"get_user_stats",
"(",
"self",
",",
"name",
")",
":",
"req",
"=",
"self",
".",
"conn",
".",
"get",
"(",
"BASE_URL",
"+",
"\"/user/\"",
"+",
"name",
")",
"if",
"req",
".",
"status_code",
"!=",
"200",
"or",
"not",
"name",
":",
"return",
"None",
... | Return data about the given user. Returns None if user
does not exist. | [
"Return",
"data",
"about",
"the",
"given",
"user",
".",
"Returns",
"None",
"if",
"user",
"does",
"not",
"exist",
"."
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L566-L574 | train | 49,359 |
volafiled/python-volapi | volapi/volapi.py | Room.upload_file | def upload_file(
self,
filename,
upload_as=None,
blocksize=None,
callback=None,
information_callback=None,
allow_timeout=False,
):
"""Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure."""
with delayed_close(
filename if hasattr(filename, "read") else open(filename, "rb")
) as file:
filename = upload_as or os.path.split(filename)[1]
try:
file.seek(0, 2)
if file.tell() > self.config.max_file:
raise ValueError(
f"File must be at most {self.config.max_file >> 30} GB"
)
finally:
try:
file.seek(0)
except Exception:
pass
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
)
headers = {"Origin": BASE_URL}
headers.update(files.headers)
while True:
key, server, file_id = self._generate_upload_key(
allow_timeout=allow_timeout
)
info = dict(
key=key,
server=server,
file_id=file_id,
room=self.room_id,
filename=filename,
len=files.len,
resumecount=0,
)
if information_callback:
if information_callback(info) is False:
continue
break
params = {"room": self.room_id, "key": key, "filename": filename}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
while True:
try:
post = self.conn.post(
f"https://{server}/upload",
params=params,
data=files,
headers=headers,
)
post.raise_for_status()
break
except requests.exceptions.ConnectionError as ex:
if "aborted" not in repr(ex): # ye, that's nasty but "compatible"
raise
try:
resume = self.conn.get(
f"https://{server}/rest/uploadStatus",
params={"key": key, "c": 1},
).text
resume = from_json(resume)
resume = resume["receivedBytes"]
if resume <= 0:
raise ConnectionError("Cannot resume")
file.seek(resume)
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
logical_offset=resume,
)
headers.update(files.headers)
params["startAt"] = resume
info["resumecount"] += 1
if information_callback:
information_callback(info)
except requests.exceptions.ConnectionError as iex:
# ye, that's nasty but "compatible"
if "aborted" not in repr(iex):
raise
continue # another day, another try
return file_id | python | def upload_file(
self,
filename,
upload_as=None,
blocksize=None,
callback=None,
information_callback=None,
allow_timeout=False,
):
"""Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure."""
with delayed_close(
filename if hasattr(filename, "read") else open(filename, "rb")
) as file:
filename = upload_as or os.path.split(filename)[1]
try:
file.seek(0, 2)
if file.tell() > self.config.max_file:
raise ValueError(
f"File must be at most {self.config.max_file >> 30} GB"
)
finally:
try:
file.seek(0)
except Exception:
pass
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
)
headers = {"Origin": BASE_URL}
headers.update(files.headers)
while True:
key, server, file_id = self._generate_upload_key(
allow_timeout=allow_timeout
)
info = dict(
key=key,
server=server,
file_id=file_id,
room=self.room_id,
filename=filename,
len=files.len,
resumecount=0,
)
if information_callback:
if information_callback(info) is False:
continue
break
params = {"room": self.room_id, "key": key, "filename": filename}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
while True:
try:
post = self.conn.post(
f"https://{server}/upload",
params=params,
data=files,
headers=headers,
)
post.raise_for_status()
break
except requests.exceptions.ConnectionError as ex:
if "aborted" not in repr(ex): # ye, that's nasty but "compatible"
raise
try:
resume = self.conn.get(
f"https://{server}/rest/uploadStatus",
params={"key": key, "c": 1},
).text
resume = from_json(resume)
resume = resume["receivedBytes"]
if resume <= 0:
raise ConnectionError("Cannot resume")
file.seek(resume)
files = Data(
{"file": {"name": filename, "value": file}},
blocksize=blocksize,
callback=callback,
logical_offset=resume,
)
headers.update(files.headers)
params["startAt"] = resume
info["resumecount"] += 1
if information_callback:
information_callback(info)
except requests.exceptions.ConnectionError as iex:
# ye, that's nasty but "compatible"
if "aborted" not in repr(iex):
raise
continue # another day, another try
return file_id | [
"def",
"upload_file",
"(",
"self",
",",
"filename",
",",
"upload_as",
"=",
"None",
",",
"blocksize",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"information_callback",
"=",
"None",
",",
"allow_timeout",
"=",
"False",
",",
")",
":",
"with",
"delayed_cl... | Uploads a file with given filename to this room.
You may specify upload_as to change the name it is uploaded as.
You can also specify a blocksize and a callback if you wish.
Returns the file's id on success and None on failure. | [
"Uploads",
"a",
"file",
"with",
"given",
"filename",
"to",
"this",
"room",
".",
"You",
"may",
"specify",
"upload_as",
"to",
"change",
"the",
"name",
"it",
"is",
"uploaded",
"as",
".",
"You",
"can",
"also",
"specify",
"a",
"blocksize",
"and",
"a",
"callba... | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L597-L701 | train | 49,360 |
volafiled/python-volapi | volapi/volapi.py | Room.close | def close(self):
"""Close connection to this room"""
if hasattr(self, "conn"):
self.conn.close()
del self.conn
if hasattr(self, "user"):
del self.user | python | def close(self):
"""Close connection to this room"""
if hasattr(self, "conn"):
self.conn.close()
del self.conn
if hasattr(self, "user"):
del self.user | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"\"conn\"",
")",
":",
"self",
".",
"conn",
".",
"close",
"(",
")",
"del",
"self",
".",
"conn",
"if",
"hasattr",
"(",
"self",
",",
"\"user\"",
")",
":",
"del",
"self",
".",
... | Close connection to this room | [
"Close",
"connection",
"to",
"this",
"room"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L703-L710 | train | 49,361 |
volafiled/python-volapi | volapi/volapi.py | Room.user_info | def user_info(self, kv):
"""Sets user_info dict entry through a tuple."""
key, value = kv
self.__user_info[key] = value | python | def user_info(self, kv):
"""Sets user_info dict entry through a tuple."""
key, value = kv
self.__user_info[key] = value | [
"def",
"user_info",
"(",
"self",
",",
"kv",
")",
":",
"key",
",",
"value",
"=",
"kv",
"self",
".",
"__user_info",
"[",
"key",
"]",
"=",
"value"
] | Sets user_info dict entry through a tuple. | [
"Sets",
"user_info",
"dict",
"entry",
"through",
"a",
"tuple",
"."
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L724-L728 | train | 49,362 |
volafiled/python-volapi | volapi/volapi.py | Room.fileinfo | def fileinfo(self, fid):
"""Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated."""
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: '{fid}' failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didn't produce a callback!\n"
"Are you sure your query wasn't malformed?"
) from ex
return info | python | def fileinfo(self, fid):
"""Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated."""
if not isinstance(fid, str):
raise TypeError("Your file ID must be a string")
try:
info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5)
if not info:
warnings.warn(
f"Your query for file with ID: '{fid}' failed.", RuntimeWarning
)
elif fid in self.__files and not self.__files[fid].updated:
self.__files[fid].fileupdate(info)
except queue.Empty as ex:
raise ValueError(
"lain didn't produce a callback!\n"
"Are you sure your query wasn't malformed?"
) from ex
return info | [
"def",
"fileinfo",
"(",
"self",
",",
"fid",
")",
":",
"if",
"not",
"isinstance",
"(",
"fid",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Your file ID must be a string\"",
")",
"try",
":",
"info",
"=",
"self",
".",
"conn",
".",
"make_call_with_cb",
... | Ask lain about what he knows about given file. If the given file
exists in the file dict, it will get updated. | [
"Ask",
"lain",
"about",
"what",
"he",
"knows",
"about",
"given",
"file",
".",
"If",
"the",
"given",
"file",
"exists",
"in",
"the",
"file",
"dict",
"it",
"will",
"get",
"updated",
"."
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L749-L768 | train | 49,363 |
volafiled/python-volapi | volapi/volapi.py | Room._generate_upload_key | def _generate_upload_key(self, allow_timeout=False):
"""Generates a new upload key"""
# Wait for server to set username if not set already.
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
while True:
params = {
"name": self.user.nick,
"room": self.room_id,
"c": self.__upload_count,
}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
info = self.conn.make_api_call("getUploadKey", params)
self.__upload_count += 1
try:
return info["key"], info["server"], info["file_id"]
except Exception:
to = int(info.get("error", {}).get("info", {}).get("timeout", 0))
if to <= 0 or not allow_timeout:
raise IOError(f"Failed to retrieve key {info}")
time.sleep(to / 10000) | python | def _generate_upload_key(self, allow_timeout=False):
"""Generates a new upload key"""
# Wait for server to set username if not set already.
while not self.user.nick:
with ARBITRATOR.condition:
ARBITRATOR.condition.wait()
while True:
params = {
"name": self.user.nick,
"room": self.room_id,
"c": self.__upload_count,
}
if self.key:
params["roomKey"] = self.key
if self.password:
params["password"] = self.password
info = self.conn.make_api_call("getUploadKey", params)
self.__upload_count += 1
try:
return info["key"], info["server"], info["file_id"]
except Exception:
to = int(info.get("error", {}).get("info", {}).get("timeout", 0))
if to <= 0 or not allow_timeout:
raise IOError(f"Failed to retrieve key {info}")
time.sleep(to / 10000) | [
"def",
"_generate_upload_key",
"(",
"self",
",",
"allow_timeout",
"=",
"False",
")",
":",
"# Wait for server to set username if not set already.",
"while",
"not",
"self",
".",
"user",
".",
"nick",
":",
"with",
"ARBITRATOR",
".",
"condition",
":",
"ARBITRATOR",
".",
... | Generates a new upload key | [
"Generates",
"a",
"new",
"upload",
"key"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L770-L795 | train | 49,364 |
volafiled/python-volapi | volapi/volapi.py | Room.delete_files | def delete_files(self, ids):
"""Remove one or more files"""
self.check_owner()
if not isinstance(ids, list):
raise TypeError("You must specify list of files to delete!")
self.conn.make_call("deleteFiles", ids) | python | def delete_files(self, ids):
"""Remove one or more files"""
self.check_owner()
if not isinstance(ids, list):
raise TypeError("You must specify list of files to delete!")
self.conn.make_call("deleteFiles", ids) | [
"def",
"delete_files",
"(",
"self",
",",
"ids",
")",
":",
"self",
".",
"check_owner",
"(",
")",
"if",
"not",
"isinstance",
"(",
"ids",
",",
"list",
")",
":",
"raise",
"TypeError",
"(",
"\"You must specify list of files to delete!\"",
")",
"self",
".",
"conn"... | Remove one or more files | [
"Remove",
"one",
"or",
"more",
"files"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L797-L803 | train | 49,365 |
volafiled/python-volapi | volapi/volapi.py | Room.transfer_owner | def transfer_owner(self, new_owner):
"""You had good run at it, it's time for someone else to get dirty"""
if not self.owner and not self.admin:
raise RuntimeError("You need more street creed to do this")
new_owner = new_owner.strip().lower()
if not new_owner:
raise ValueError("Empty strings cannot be new owners")
self.__set_config_value("owner", new_owner) | python | def transfer_owner(self, new_owner):
"""You had good run at it, it's time for someone else to get dirty"""
if not self.owner and not self.admin:
raise RuntimeError("You need more street creed to do this")
new_owner = new_owner.strip().lower()
if not new_owner:
raise ValueError("Empty strings cannot be new owners")
self.__set_config_value("owner", new_owner) | [
"def",
"transfer_owner",
"(",
"self",
",",
"new_owner",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"You need more street creed to do this\"",
")",
"new_owner",
"=",
"new_owner",
".",
"s... | You had good run at it, it's time for someone else to get dirty | [
"You",
"had",
"good",
"run",
"at",
"it",
"it",
"s",
"time",
"for",
"someone",
"else",
"to",
"get",
"dirty"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L805-L815 | train | 49,366 |
volafiled/python-volapi | volapi/volapi.py | Room.add_janitor | def add_janitor(self, janitor):
"""Add janitor to the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor in self.config.janitors:
return
self.config.janitors.append(janitor)
self.__set_config_value("janitors", self.config.janitors) | python | def add_janitor(self, janitor):
"""Add janitor to the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor in self.config.janitors:
return
self.config.janitors.append(janitor)
self.__set_config_value("janitors", self.config.janitors) | [
"def",
"add_janitor",
"(",
"self",
",",
"janitor",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"Not enough street creed to do this\"",
")",
"janitor",
"=",
"janitor",
".",
"strip",
"("... | Add janitor to the room | [
"Add",
"janitor",
"to",
"the",
"room"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L817-L831 | train | 49,367 |
volafiled/python-volapi | volapi/volapi.py | Room.remove_janitor | def remove_janitor(self, janitor):
"""Remove janitor from the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor not in self.config.janitors:
return
self.config.janitors.remove(janitor)
self.__set_config_value("janitors", self.config.janitors) | python | def remove_janitor(self, janitor):
"""Remove janitor from the room"""
if not self.owner and not self.admin:
raise RuntimeError("Not enough street creed to do this")
janitor = janitor.strip().lower()
if not janitor:
raise ValueError("Empty strings cannot be janitors")
if janitor not in self.config.janitors:
return
self.config.janitors.remove(janitor)
self.__set_config_value("janitors", self.config.janitors) | [
"def",
"remove_janitor",
"(",
"self",
",",
"janitor",
")",
":",
"if",
"not",
"self",
".",
"owner",
"and",
"not",
"self",
".",
"admin",
":",
"raise",
"RuntimeError",
"(",
"\"Not enough street creed to do this\"",
")",
"janitor",
"=",
"janitor",
".",
"strip",
... | Remove janitor from the room | [
"Remove",
"janitor",
"from",
"the",
"room"
] | 5f0bc03dbde703264ac6ed494e2050761f688a3e | https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L833-L847 | train | 49,368 |
BeyondTheClouds/enoslib | enoslib/task.py | _make_env | def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env | python | def _make_env(resultdir=None):
"""Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from.
"""
env = {
"config": {}, # The config
"resultdir": "", # Path to the result directory
"config_file": "", # The initial config file
"nodes": {}, # Roles with nodes
"phase": "", # Last phase that have been run
"user": "", # User id for this job
"cwd": os.getcwd() # Current Working Directory
}
if resultdir:
env_path = os.path.join(resultdir, "env")
if os.path.isfile(env_path):
with open(env_path, "r") as f:
env.update(yaml.load(f))
logger.debug("Loaded environment %s", env_path)
if "config_file" in env and env["config_file"] is not None:
# Resets the configuration of the environment
if os.path.isfile(env["config_file"]):
with open(env["config_file"], "r") as f:
env["config"].update(yaml.load(f))
logger.debug("Reloaded config %s", env["config"])
return env | [
"def",
"_make_env",
"(",
"resultdir",
"=",
"None",
")",
":",
"env",
"=",
"{",
"\"config\"",
":",
"{",
"}",
",",
"# The config",
"\"resultdir\"",
":",
"\"\"",
",",
"# Path to the result directory",
"\"config_file\"",
":",
"\"\"",
",",
"# The initial config file",
... | Loads the env from `resultdir` if not `None` or makes a new one.
An Enos environment handles all specific variables of an
experiment. This function either generates a new environment or
loads a previous one. If the value of `resultdir` is `None`, then
this function makes a new environment and return it. If the value
is a directory path that contains an Enos environment, then this function
loads and returns it.
In case of a directory path, this function also rereads the
configuration file (the reservation.yaml) and reloads it. This
lets the user update his configuration between each phase.
Args:
resultdir (str): directory path to load the env from. | [
"Loads",
"the",
"env",
"from",
"resultdir",
"if",
"not",
"None",
"or",
"makes",
"a",
"new",
"one",
"."
] | fb00be58e56a7848cfe482187d659744919fe2f7 | https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L69-L110 | train | 49,369 |
BeyondTheClouds/enoslib | enoslib/task.py | _save_env | def _save_env(env):
"""Saves one environment.
Args:
env (dict): the env dict to save.
"""
env_path = os.path.join(env["resultdir"], "env")
if os.path.isdir(env["resultdir"]):
with open(env_path, "w") as f:
yaml.dump(env, f) | python | def _save_env(env):
"""Saves one environment.
Args:
env (dict): the env dict to save.
"""
env_path = os.path.join(env["resultdir"], "env")
if os.path.isdir(env["resultdir"]):
with open(env_path, "w") as f:
yaml.dump(env, f) | [
"def",
"_save_env",
"(",
"env",
")",
":",
"env_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"env",
"[",
"\"resultdir\"",
"]",
",",
"\"env\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"env",
"[",
"\"resultdir\"",
"]",
")",
":",
"with",
... | Saves one environment.
Args:
env (dict): the env dict to save. | [
"Saves",
"one",
"environment",
"."
] | fb00be58e56a7848cfe482187d659744919fe2f7 | https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L113-L123 | train | 49,370 |
BeyondTheClouds/enoslib | enoslib/task.py | _set_resultdir | def _set_resultdir(name=None):
"""Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory.
"""
# Compute file path of results directory
resultdir_name = name or "enos_" + datetime.today().isoformat()
resultdir_path = os.path.abspath(resultdir_name)
# Raise error if a related file exists
if os.path.isfile(resultdir_path):
raise EnosFilePathError(resultdir_path,
"Result directory cannot be created due "
"to existing file %s" % resultdir_path)
# Create the result directory if it does not exist
if not os.path.isdir(resultdir_path):
os.mkdir(resultdir_path)
logger.info("Generate results directory %s" % resultdir_path)
# Symlink the result directory with the "cwd/current" directory
link_path = SYMLINK_NAME
if os.path.lexists(link_path):
os.remove(link_path)
try:
os.symlink(resultdir_path, link_path)
logger.info("Symlink %s to %s" % (resultdir_path, link_path))
except OSError:
# An harmless error can occur due to a race condition when
# multiple regions are simultaneously deployed
logger.warning("Symlink %s to %s failed" %
(resultdir_path, link_path))
return resultdir_path | python | def _set_resultdir(name=None):
"""Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory.
"""
# Compute file path of results directory
resultdir_name = name or "enos_" + datetime.today().isoformat()
resultdir_path = os.path.abspath(resultdir_name)
# Raise error if a related file exists
if os.path.isfile(resultdir_path):
raise EnosFilePathError(resultdir_path,
"Result directory cannot be created due "
"to existing file %s" % resultdir_path)
# Create the result directory if it does not exist
if not os.path.isdir(resultdir_path):
os.mkdir(resultdir_path)
logger.info("Generate results directory %s" % resultdir_path)
# Symlink the result directory with the "cwd/current" directory
link_path = SYMLINK_NAME
if os.path.lexists(link_path):
os.remove(link_path)
try:
os.symlink(resultdir_path, link_path)
logger.info("Symlink %s to %s" % (resultdir_path, link_path))
except OSError:
# An harmless error can occur due to a race condition when
# multiple regions are simultaneously deployed
logger.warning("Symlink %s to %s failed" %
(resultdir_path, link_path))
return resultdir_path | [
"def",
"_set_resultdir",
"(",
"name",
"=",
"None",
")",
":",
"# Compute file path of results directory",
"resultdir_name",
"=",
"name",
"or",
"\"enos_\"",
"+",
"datetime",
".",
"today",
"(",
")",
".",
"isoformat",
"(",
")",
"resultdir_path",
"=",
"os",
".",
"p... | Set or get the directory to store experiment results.
Looks at the `name` and create the directory if it doesn"t exist
or returns it in other cases. If the name is `None`, then the
function generates an unique name for the results directory.
Finally, it links the directory to `SYMLINK_NAME`.
Args:
name (str): file path to an existing directory. It could be
weather an absolute or a relative to the current working
directory.
Returns:
the file path of the results directory. | [
"Set",
"or",
"get",
"the",
"directory",
"to",
"store",
"experiment",
"results",
"."
] | fb00be58e56a7848cfe482187d659744919fe2f7 | https://github.com/BeyondTheClouds/enoslib/blob/fb00be58e56a7848cfe482187d659744919fe2f7/enoslib/task.py#L144-L189 | train | 49,371 |
BreakingBytes/simkit | simkit/core/data_sources.py | DataSource._is_cached | def _is_cached(self, ext='.json'):
"""
Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool
"""
# extension must start with a dot
if not ext.startswith('.'):
# prepend extension with a dot
ext = '.%s' % ext
# cache file is filename with extension
cache_file = '%s%s' % (self.filename, ext)
# if filename already ends with extension or there's a file with the
# extension, then assume the data is cached
return self.filename.endswith(ext) or os.path.exists(cache_file) | python | def _is_cached(self, ext='.json'):
"""
Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool
"""
# extension must start with a dot
if not ext.startswith('.'):
# prepend extension with a dot
ext = '.%s' % ext
# cache file is filename with extension
cache_file = '%s%s' % (self.filename, ext)
# if filename already ends with extension or there's a file with the
# extension, then assume the data is cached
return self.filename.endswith(ext) or os.path.exists(cache_file) | [
"def",
"_is_cached",
"(",
"self",
",",
"ext",
"=",
"'.json'",
")",
":",
"# extension must start with a dot",
"if",
"not",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"# prepend extension with a dot",
"ext",
"=",
"'.%s'",
"%",
"ext",
"# cache file is filename w... | Determine if ``filename`` is cached using extension ``ex`` a string.
:param ext: extension used to cache ``filename``, default is '.json'
:type ext: str
:return: True if ``filename`` is cached using extensions ``ex``
:rtype: bool | [
"Determine",
"if",
"filename",
"is",
"cached",
"using",
"extension",
"ex",
"a",
"string",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_sources.py#L237-L254 | train | 49,372 |
guaix-ucm/pyemir | emirdrp/tools/list_slitlets_from_string.py | list_slitlets_from_string | def list_slitlets_from_string(s, islitlet_min, islitlet_max):
"""Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets.
"""
# protection
if not isinstance(s, str):
print('type(s): ', type(s))
print('ERROR: function expected a string parameter')
# initialize empty output
set_slitlets = set()
# remove leading blank spaces
s = re.sub('^ *', '', s)
# remove trailing blank spaces
s = re.sub(' *$', '', s)
# remove blank space before ','
s = re.sub(' *,', ',', s)
# remove blank spaces after ','
s = re.sub(', *', ',', s)
# remove many blank spaces by a single blank space
s = re.sub(' +', ' ', s)
stuples = s.split()
for item in stuples:
subitems = item.split(',')
nsubitems = len(subitems)
if nsubitems == 1:
n1 = int(subitems[0])
n2 = n1
step = 1
elif nsubitems == 2:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = 1
elif nsubitems == 3:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = int(subitems[2])
else:
raise ValueError('Unexpected slitlet range:', s)
for i in range(n1, n2 + 1, step):
if islitlet_min <= i <= islitlet_max:
set_slitlets.add(i)
else:
print('islitlet_min: ', islitlet_min)
print('islitlet_max: ', islitlet_max)
print('i...........: ', i)
raise ValueError("Slitlet number out of range!")
list_slitlets = list(set_slitlets)
list_slitlets.sort()
# return result
return list_slitlets | python | def list_slitlets_from_string(s, islitlet_min, islitlet_max):
"""Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets.
"""
# protection
if not isinstance(s, str):
print('type(s): ', type(s))
print('ERROR: function expected a string parameter')
# initialize empty output
set_slitlets = set()
# remove leading blank spaces
s = re.sub('^ *', '', s)
# remove trailing blank spaces
s = re.sub(' *$', '', s)
# remove blank space before ','
s = re.sub(' *,', ',', s)
# remove blank spaces after ','
s = re.sub(', *', ',', s)
# remove many blank spaces by a single blank space
s = re.sub(' +', ' ', s)
stuples = s.split()
for item in stuples:
subitems = item.split(',')
nsubitems = len(subitems)
if nsubitems == 1:
n1 = int(subitems[0])
n2 = n1
step = 1
elif nsubitems == 2:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = 1
elif nsubitems == 3:
n1 = int(subitems[0])
n2 = int(subitems[1])
step = int(subitems[2])
else:
raise ValueError('Unexpected slitlet range:', s)
for i in range(n1, n2 + 1, step):
if islitlet_min <= i <= islitlet_max:
set_slitlets.add(i)
else:
print('islitlet_min: ', islitlet_min)
print('islitlet_max: ', islitlet_max)
print('i...........: ', i)
raise ValueError("Slitlet number out of range!")
list_slitlets = list(set_slitlets)
list_slitlets.sort()
# return result
return list_slitlets | [
"def",
"list_slitlets_from_string",
"(",
"s",
",",
"islitlet_min",
",",
"islitlet_max",
")",
":",
"# protection",
"if",
"not",
"isinstance",
"(",
"s",
",",
"str",
")",
":",
"print",
"(",
"'type(s): '",
",",
"type",
"(",
"s",
")",
")",
"print",
"(",
"'ERR... | Return list of slitlets from string specification.
Parameters
----------
s : string
String defining the slitlets. The slitlets must be specify
as a set of n1[,n2[,step]] tuples. If only n1 is provided,
the slitlet number n1 is considered. When n1 and n2 are given
but step is missing, step=1 is assumed. Finally, when all
n1, n2 and step are given, slitlets considered are those
returned by range(n1, n2 + 1, step) function.
islitlet_min : int
Minimum slitlet number allowed.
islitlet_max : int
Maximum slitlet number allowed.
Returns
-------
list_slitlets : Python list
List of slitlets. | [
"Return",
"list",
"of",
"slitlets",
"from",
"string",
"specification",
"."
] | fef6bbabcb13f80123cafd1800a0f508a3c21702 | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/tools/list_slitlets_from_string.py#L26-L99 | train | 49,373 |
Jaymon/prom | prom/query.py | Query.ref | def ref(self, orm_classpath, cls_pk=None):
"""
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
"""
# split orm from module path
orm_module, orm_class = get_objects(orm_classpath)
# if orm_classpath.startswith("."):
# # we handle relative classpaths by using the orm_class and its parents
# # to find the relative import
# if self.orm_class:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# self.orm_class.__module__
# )
# except ImportError:
# parents = inspect.getmro(self.orm_class)
# if parents:
# for pc in parents[1:-1]:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# pc.__module__
# )
# except ImportError:
# pass
#
# if not orm_module or not orm_class:
# raise ImportError(
# "Unable to resolve relative ref using {}".format(
# self.orm_class.__module__
# )
# )
#
# else:
# raise ImportError("trying relative ref without orm_class")
#
# else:
# orm_module, orm_class = get_objects(orm_classpath)
# if isinstance(orm_classpath, basestring):
# orm_module, orm_class = get_objects(orm_classpath)
# else:
# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])
q = orm_class.query
if cls_pk:
found = False
for fn, f in orm_class.schema.fields.items():
cls_ref_s = f.schema
if cls_ref_s and self.schema == cls_ref_s:
q.is_field(fn, cls_pk)
found = True
break
if not found:
raise ValueError("Did not find a foreign key field for [{}] in [{}]".format(
self.orm_class.table_name,
orm_class.table_name,
))
return q | python | def ref(self, orm_classpath, cls_pk=None):
"""
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
"""
# split orm from module path
orm_module, orm_class = get_objects(orm_classpath)
# if orm_classpath.startswith("."):
# # we handle relative classpaths by using the orm_class and its parents
# # to find the relative import
# if self.orm_class:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# self.orm_class.__module__
# )
# except ImportError:
# parents = inspect.getmro(self.orm_class)
# if parents:
# for pc in parents[1:-1]:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# pc.__module__
# )
# except ImportError:
# pass
#
# if not orm_module or not orm_class:
# raise ImportError(
# "Unable to resolve relative ref using {}".format(
# self.orm_class.__module__
# )
# )
#
# else:
# raise ImportError("trying relative ref without orm_class")
#
# else:
# orm_module, orm_class = get_objects(orm_classpath)
# if isinstance(orm_classpath, basestring):
# orm_module, orm_class = get_objects(orm_classpath)
# else:
# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])
q = orm_class.query
if cls_pk:
found = False
for fn, f in orm_class.schema.fields.items():
cls_ref_s = f.schema
if cls_ref_s and self.schema == cls_ref_s:
q.is_field(fn, cls_pk)
found = True
break
if not found:
raise ValueError("Did not find a foreign key field for [{}] in [{}]".format(
self.orm_class.table_name,
orm_class.table_name,
))
return q | [
"def",
"ref",
"(",
"self",
",",
"orm_classpath",
",",
"cls_pk",
"=",
"None",
")",
":",
"# split orm from module path",
"orm_module",
",",
"orm_class",
"=",
"get_objects",
"(",
"orm_classpath",
")",
"# if orm_classpath.startswith(\".\"):",
"# # we handl... | takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query() | [
"takes",
"a",
"classpath",
"to",
"allow",
"query",
"-",
"ing",
"from",
"another",
"Orm",
"class"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L638-L711 | train | 49,374 |
Jaymon/prom | prom/query.py | Query.select_fields | def select_fields(self, *fields):
"""set multiple fields to be selected"""
if fields:
if not isinstance(fields[0], basestring):
fields = list(fields[0]) + list(fields)[1:]
for field_name in fields:
field_name = self._normalize_field_name(field_name)
self.select_field(field_name)
return self | python | def select_fields(self, *fields):
"""set multiple fields to be selected"""
if fields:
if not isinstance(fields[0], basestring):
fields = list(fields[0]) + list(fields)[1:]
for field_name in fields:
field_name = self._normalize_field_name(field_name)
self.select_field(field_name)
return self | [
"def",
"select_fields",
"(",
"self",
",",
"*",
"fields",
")",
":",
"if",
"fields",
":",
"if",
"not",
"isinstance",
"(",
"fields",
"[",
"0",
"]",
",",
"basestring",
")",
":",
"fields",
"=",
"list",
"(",
"fields",
"[",
"0",
"]",
")",
"+",
"list",
"... | set multiple fields to be selected | [
"set",
"multiple",
"fields",
"to",
"be",
"selected"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L737-L746 | train | 49,375 |
Jaymon/prom | prom/query.py | Query.set_field | def set_field(self, field_name, field_val=None):
"""
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
"""
field_name = self._normalize_field_name(field_name)
self.fields_set.append(field_name, [field_name, field_val])
return self | python | def set_field(self, field_name, field_val=None):
"""
set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db
"""
field_name = self._normalize_field_name(field_name)
self.fields_set.append(field_name, [field_name, field_val])
return self | [
"def",
"set_field",
"(",
"self",
",",
"field_name",
",",
"field_val",
"=",
"None",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"self",
".",
"fields_set",
".",
"append",
"(",
"field_name",
",",
"[",
"field_name",... | set a field into .fields attribute
n insert/update queries, these are the fields that will be inserted/updated into the db | [
"set",
"a",
"field",
"into",
".",
"fields",
"attribute"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L748-L756 | train | 49,376 |
Jaymon/prom | prom/query.py | Query.set_fields | def set_fields(self, fields=None, *fields_args, **fields_kwargs):
"""
completely replaces the current .fields with fields and fields_kwargs combined
"""
if fields_args:
fields = [fields]
fields.extend(fields_args)
for field_name in fields:
self.set_field(field_name)
elif fields_kwargs:
fields = make_dict(fields, fields_kwargs)
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
if isinstance(fields, Mapping):
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
for field_name in fields:
self.set_field(field_name)
return self | python | def set_fields(self, fields=None, *fields_args, **fields_kwargs):
"""
completely replaces the current .fields with fields and fields_kwargs combined
"""
if fields_args:
fields = [fields]
fields.extend(fields_args)
for field_name in fields:
self.set_field(field_name)
elif fields_kwargs:
fields = make_dict(fields, fields_kwargs)
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
if isinstance(fields, Mapping):
for field_name, field_val in fields.items():
self.set_field(field_name, field_val)
else:
for field_name in fields:
self.set_field(field_name)
return self | [
"def",
"set_fields",
"(",
"self",
",",
"fields",
"=",
"None",
",",
"*",
"fields_args",
",",
"*",
"*",
"fields_kwargs",
")",
":",
"if",
"fields_args",
":",
"fields",
"=",
"[",
"fields",
"]",
"fields",
".",
"extend",
"(",
"fields_args",
")",
"for",
"fiel... | completely replaces the current .fields with fields and fields_kwargs combined | [
"completely",
"replaces",
"the",
"current",
".",
"fields",
"with",
"fields",
"and",
"fields_kwargs",
"combined"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L762-L786 | train | 49,377 |
Jaymon/prom | prom/query.py | Query.nlike_field | def nlike_field(self, field_name, *field_val, **field_kwargs):
"""Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface
"""
if not field_val:
raise ValueError("Cannot NOT LIKE nothing")
field_name = self._normalize_field_name(field_name)
fv = field_val[0]
self.fields_where.append(field_name, ["nlike", field_name, fv, field_kwargs])
return self | python | def nlike_field(self, field_name, *field_val, **field_kwargs):
"""Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface
"""
if not field_val:
raise ValueError("Cannot NOT LIKE nothing")
field_name = self._normalize_field_name(field_name)
fv = field_val[0]
self.fields_where.append(field_name, ["nlike", field_name, fv, field_kwargs])
return self | [
"def",
"nlike_field",
"(",
"self",
",",
"field_name",
",",
"*",
"field_val",
",",
"*",
"*",
"field_kwargs",
")",
":",
"if",
"not",
"field_val",
":",
"raise",
"ValueError",
"(",
"\"Cannot NOT LIKE nothing\"",
")",
"field_name",
"=",
"self",
".",
"_normalize_fie... | Perform a field_name NOT LIKE field_val query
:param field_name: string, the field we are filtering on
:param field_val: string, the like query: %val, %val%, val%
:returns: self, for fluid interface | [
"Perform",
"a",
"field_name",
"NOT",
"LIKE",
"field_val",
"query"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L890-L902 | train | 49,378 |
Jaymon/prom | prom/query.py | Query.sort_field | def sort_field(self, field_name, direction, field_vals=None):
"""
sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in
"""
field_name = self._normalize_field_name(field_name)
if direction > 0:
direction = 1
elif direction < 0:
direction = -1
else:
raise ValueError("direction {} is undefined".format(direction))
self.fields_sort.append(field_name, [direction, field_name, list(field_vals) if field_vals else field_vals])
return self | python | def sort_field(self, field_name, direction, field_vals=None):
"""
sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in
"""
field_name = self._normalize_field_name(field_name)
if direction > 0:
direction = 1
elif direction < 0:
direction = -1
else:
raise ValueError("direction {} is undefined".format(direction))
self.fields_sort.append(field_name, [direction, field_name, list(field_vals) if field_vals else field_vals])
return self | [
"def",
"sort_field",
"(",
"self",
",",
"field_name",
",",
"direction",
",",
"field_vals",
"=",
"None",
")",
":",
"field_name",
"=",
"self",
".",
"_normalize_field_name",
"(",
"field_name",
")",
"if",
"direction",
">",
"0",
":",
"direction",
"=",
"1",
"elif... | sort this query by field_name in directrion
field_name -- string -- the field to sort on
direction -- integer -- negative for DESC, positive for ASC
field_vals -- list -- the order the rows should be returned in | [
"sort",
"this",
"query",
"by",
"field_name",
"in",
"directrion"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L904-L921 | train | 49,379 |
Jaymon/prom | prom/query.py | Query.get | def get(self, limit=None, page=None):
"""
get results from the db
return -- Iterator()
"""
has_more = False
self.bounds.paginate = True
limit_paginate, offset = self.bounds.get(limit, page)
self.default_val = []
results = self._query('get')
if limit_paginate:
self.bounds.paginate = False
if len(results) == limit_paginate:
has_more = True
results.pop(-1)
it = ResultsIterator(results, orm_class=self.orm_class, has_more=has_more, query=self)
return self.iterator_class(it) | python | def get(self, limit=None, page=None):
"""
get results from the db
return -- Iterator()
"""
has_more = False
self.bounds.paginate = True
limit_paginate, offset = self.bounds.get(limit, page)
self.default_val = []
results = self._query('get')
if limit_paginate:
self.bounds.paginate = False
if len(results) == limit_paginate:
has_more = True
results.pop(-1)
it = ResultsIterator(results, orm_class=self.orm_class, has_more=has_more, query=self)
return self.iterator_class(it) | [
"def",
"get",
"(",
"self",
",",
"limit",
"=",
"None",
",",
"page",
"=",
"None",
")",
":",
"has_more",
"=",
"False",
"self",
".",
"bounds",
".",
"paginate",
"=",
"True",
"limit_paginate",
",",
"offset",
"=",
"self",
".",
"bounds",
".",
"get",
"(",
"... | get results from the db
return -- Iterator() | [
"get",
"results",
"from",
"the",
"db"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1013-L1032 | train | 49,380 |
Jaymon/prom | prom/query.py | Query.get_one | def get_one(self):
"""get one row from the db"""
self.default_val = None
o = self.default_val
d = self._query('get_one')
if d:
o = self.orm_class(d, hydrate=True)
return o | python | def get_one(self):
"""get one row from the db"""
self.default_val = None
o = self.default_val
d = self._query('get_one')
if d:
o = self.orm_class(d, hydrate=True)
return o | [
"def",
"get_one",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"None",
"o",
"=",
"self",
".",
"default_val",
"d",
"=",
"self",
".",
"_query",
"(",
"'get_one'",
")",
"if",
"d",
":",
"o",
"=",
"self",
".",
"orm_class",
"(",
"d",
",",
"hy... | get one row from the db | [
"get",
"one",
"row",
"from",
"the",
"db"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1048-L1055 | train | 49,381 |
Jaymon/prom | prom/query.py | Query.value | def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals | python | def value(self):
"""convenience method to just get one value or tuple of values for the query"""
field_vals = None
field_names = self.fields_select.names()
fcount = len(field_names)
if fcount:
d = self._query('get_one')
if d:
field_vals = [d.get(fn, None) for fn in field_names]
if fcount == 1:
field_vals = field_vals[0]
else:
raise ValueError("no select fields were set, so cannot return value")
return field_vals | [
"def",
"value",
"(",
"self",
")",
":",
"field_vals",
"=",
"None",
"field_names",
"=",
"self",
".",
"fields_select",
".",
"names",
"(",
")",
"fcount",
"=",
"len",
"(",
"field_names",
")",
"if",
"fcount",
":",
"d",
"=",
"self",
".",
"_query",
"(",
"'ge... | convenience method to just get one value or tuple of values for the query | [
"convenience",
"method",
"to",
"just",
"get",
"one",
"value",
"or",
"tuple",
"of",
"values",
"for",
"the",
"query"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1065-L1080 | train | 49,382 |
Jaymon/prom | prom/query.py | Query.count | def count(self):
"""return the count of the criteria"""
# count queries shouldn't care about sorting
fields_sort = self.fields_sort
self.fields_sort = self.fields_sort_class()
self.default_val = 0
ret = self._query('count')
# restore previous values now that count is done
self.fields_sort = fields_sort
return ret | python | def count(self):
"""return the count of the criteria"""
# count queries shouldn't care about sorting
fields_sort = self.fields_sort
self.fields_sort = self.fields_sort_class()
self.default_val = 0
ret = self._query('count')
# restore previous values now that count is done
self.fields_sort = fields_sort
return ret | [
"def",
"count",
"(",
"self",
")",
":",
"# count queries shouldn't care about sorting",
"fields_sort",
"=",
"self",
".",
"fields_sort",
"self",
".",
"fields_sort",
"=",
"self",
".",
"fields_sort_class",
"(",
")",
"self",
".",
"default_val",
"=",
"0",
"ret",
"=",
... | return the count of the criteria | [
"return",
"the",
"count",
"of",
"the",
"criteria"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1110-L1123 | train | 49,383 |
Jaymon/prom | prom/query.py | Query.insert | def insert(self):
"""persist the .fields"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=False)
#self.set_fields(fields)
return self.interface.insert(
self.schema,
self.fields
)
return self.interface.insert(self.schema, self.fields) | python | def insert(self):
"""persist the .fields"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=False)
#self.set_fields(fields)
return self.interface.insert(
self.schema,
self.fields
)
return self.interface.insert(self.schema, self.fields) | [
"def",
"insert",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"0",
"#fields = self.fields",
"#fields = self.orm_class.depart(self.fields, is_update=False)",
"#self.set_fields(fields)",
"return",
"self",
".",
"interface",
".",
"insert",
"(",
"self",
".",
"sche... | persist the .fields | [
"persist",
"the",
".",
"fields"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1130-L1141 | train | 49,384 |
Jaymon/prom | prom/query.py | Query.update | def update(self):
"""persist the .fields using .fields_where"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=True)
#self.set_fields(fields)
return self.interface.update(
self.schema,
self.fields,
self
) | python | def update(self):
"""persist the .fields using .fields_where"""
self.default_val = 0
#fields = self.fields
#fields = self.orm_class.depart(self.fields, is_update=True)
#self.set_fields(fields)
return self.interface.update(
self.schema,
self.fields,
self
) | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"default_val",
"=",
"0",
"#fields = self.fields",
"#fields = self.orm_class.depart(self.fields, is_update=True)",
"#self.set_fields(fields)",
"return",
"self",
".",
"interface",
".",
"update",
"(",
"self",
".",
"schem... | persist the .fields using .fields_where | [
"persist",
"the",
".",
"fields",
"using",
".",
"fields_where"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1143-L1153 | train | 49,385 |
Jaymon/prom | prom/query.py | BaseCacheQuery.cache_key | def cache_key(self, method_name):
"""decides if this query is cacheable, returns a key if it is, otherwise empty"""
key = ""
method = getattr(self, "cache_key_{}".format(method_name), None)
if method:
key = method()
return key | python | def cache_key(self, method_name):
"""decides if this query is cacheable, returns a key if it is, otherwise empty"""
key = ""
method = getattr(self, "cache_key_{}".format(method_name), None)
if method:
key = method()
return key | [
"def",
"cache_key",
"(",
"self",
",",
"method_name",
")",
":",
"key",
"=",
"\"\"",
"method",
"=",
"getattr",
"(",
"self",
",",
"\"cache_key_{}\"",
".",
"format",
"(",
"method_name",
")",
",",
"None",
")",
"if",
"method",
":",
"key",
"=",
"method",
"(",... | decides if this query is cacheable, returns a key if it is, otherwise empty | [
"decides",
"if",
"this",
"query",
"is",
"cacheable",
"returns",
"a",
"key",
"if",
"it",
"is",
"otherwise",
"empty"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1356-L1363 | train | 49,386 |
Jaymon/prom | prom/query.py | CacheNamespace.ttl | def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret | python | def ttl(self):
"""how long you should cache results for cacheable queries"""
ret = 3600
cn = self.get_process()
if "ttl" in cn:
ret = cn["ttl"]
return ret | [
"def",
"ttl",
"(",
"self",
")",
":",
"ret",
"=",
"3600",
"cn",
"=",
"self",
".",
"get_process",
"(",
")",
"if",
"\"ttl\"",
"in",
"cn",
":",
"ret",
"=",
"cn",
"[",
"\"ttl\"",
"]",
"return",
"ret"
] | how long you should cache results for cacheable queries | [
"how",
"long",
"you",
"should",
"cache",
"results",
"for",
"cacheable",
"queries"
] | b7ad2c259eca198da03e1e4bc7d95014c168c361 | https://github.com/Jaymon/prom/blob/b7ad2c259eca198da03e1e4bc7d95014c168c361/prom/query.py#L1440-L1446 | train | 49,387 |
BreakingBytes/simkit | simkit/core/data_readers.py | _read_header | def _read_header(f, header_param):
"""
Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists.
"""
# default delimiter is a comma, can't be None
header_delim = str(header_param.get('delimiter', ','))
# don't allow unnamed fields
if 'fields' not in header_param:
raise UnnamedDataError(f.name)
header_fields = {field[0]: field[1:] for field in header_param['fields']}
# header_names can't be generator b/c DictReader needs list, and can't be
# dictionary b/c must be same order as 'fields' to match data readby csv
header_names = [field[0] for field in header_param['fields']]
# read header
header_str = StringIO(f.readline()) # read the 1st line
# use csv because it will preserve quoted fields with commas
# make a csv.DictReader from header string, use header names for
# fieldnames and set delimiter to header delimiter
header_reader = csv.DictReader(header_str, header_names,
delimiter=header_delim,
skipinitialspace=True)
data = header_reader.next() # parse the header dictionary
# iterate over items in data
for k, v in data.iteritems():
header_type = header_fields[k][0] # spec'd type
# whitelist header types
if isinstance(header_type, basestring):
if header_type.lower().startswith('int'):
header_type = int # coerce to integer
elif header_type.lower().startswith('long'):
header_type = long # coerce to long integer
elif header_type.lower().startswith('float'):
header_type = float # to floating decimal point
elif header_type.lower().startswith('str'):
header_type = str # coerce to string
elif header_type.lower().startswith('bool'):
header_type = bool # coerce to boolean
else:
raise TypeError('"%s" is not a supported type.' % header_type)
# WARNING! Use of `eval` considered harmful. `header_type` is read
# from JSON file, not secure input, could be used to exploit system
data[k] = header_type(v) # cast v to type
# check for units in 3rd element
if len(header_fields[k]) > 1:
units = UREG(str(header_fields[k][1])) # spec'd units
data[k] = data[k] * units # apply units
return data | python | def _read_header(f, header_param):
"""
Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists.
"""
# default delimiter is a comma, can't be None
header_delim = str(header_param.get('delimiter', ','))
# don't allow unnamed fields
if 'fields' not in header_param:
raise UnnamedDataError(f.name)
header_fields = {field[0]: field[1:] for field in header_param['fields']}
# header_names can't be generator b/c DictReader needs list, and can't be
# dictionary b/c must be same order as 'fields' to match data readby csv
header_names = [field[0] for field in header_param['fields']]
# read header
header_str = StringIO(f.readline()) # read the 1st line
# use csv because it will preserve quoted fields with commas
# make a csv.DictReader from header string, use header names for
# fieldnames and set delimiter to header delimiter
header_reader = csv.DictReader(header_str, header_names,
delimiter=header_delim,
skipinitialspace=True)
data = header_reader.next() # parse the header dictionary
# iterate over items in data
for k, v in data.iteritems():
header_type = header_fields[k][0] # spec'd type
# whitelist header types
if isinstance(header_type, basestring):
if header_type.lower().startswith('int'):
header_type = int # coerce to integer
elif header_type.lower().startswith('long'):
header_type = long # coerce to long integer
elif header_type.lower().startswith('float'):
header_type = float # to floating decimal point
elif header_type.lower().startswith('str'):
header_type = str # coerce to string
elif header_type.lower().startswith('bool'):
header_type = bool # coerce to boolean
else:
raise TypeError('"%s" is not a supported type.' % header_type)
# WARNING! Use of `eval` considered harmful. `header_type` is read
# from JSON file, not secure input, could be used to exploit system
data[k] = header_type(v) # cast v to type
# check for units in 3rd element
if len(header_fields[k]) > 1:
units = UREG(str(header_fields[k][1])) # spec'd units
data[k] = data[k] * units # apply units
return data | [
"def",
"_read_header",
"(",
"f",
",",
"header_param",
")",
":",
"# default delimiter is a comma, can't be None",
"header_delim",
"=",
"str",
"(",
"header_param",
".",
"get",
"(",
"'delimiter'",
",",
"','",
")",
")",
"# don't allow unnamed fields",
"if",
"'fields'",
... | Read and parse data from 1st line of a file.
:param f: :func:`file` or :class:`~StringIO.StringIO` object from which to
read 1st line.
:type f: file
:param header_param: Parameters used to parse the data from the header.
Contains "delimiter" and "fields".
:type header_param: dict
:returns: Dictionary of data read from header.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
The **header_param** argument contains keys to read the 1st line of **f**.
If "delimiter" is ``None`` or missing, the default delimiter is a comma,
otherwise "delimiter" can be any single character, integer or sequence of
``int``.
* single character -- a delimiter
* single integer -- uniform fixed width
* sequence of ``int`` -- fixed widths, the number of fields should \
correspond to the length of the sequence.
The "fields" key is a list of (parameter-name, parameter-type[, parameter-
units]) lists. | [
"Read",
"and",
"parse",
"data",
"from",
"1st",
"line",
"of",
"a",
"file",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L536-L605 | train | 49,388 |
BreakingBytes/simkit | simkit/core/data_readers.py | _apply_units | def _apply_units(data_data, data_units, fname):
"""
Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
data_names = data_data.dtype.names
# raise error if NumPy data doesn't have names
if not data_names:
raise UnnamedDataError(fname)
data = dict.fromkeys(data_names) # dictionary of data read by NumPy
# iterate over data read by NumPy
for data_name in data_names:
if data_name in data_units:
# if units specified in parameters, then convert to string
units = str(data_units[data_name])
data[data_name] = data_data[data_name] * UREG(units)
elif np.issubdtype(data_data[data_name].dtype, str):
# if no units specified and is string
data[data_name] = data_data[data_name].tolist()
else:
data[data_name] = data_data[data_name]
return data | python | def _apply_units(data_data, data_units, fname):
"""
Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError`
"""
data_names = data_data.dtype.names
# raise error if NumPy data doesn't have names
if not data_names:
raise UnnamedDataError(fname)
data = dict.fromkeys(data_names) # dictionary of data read by NumPy
# iterate over data read by NumPy
for data_name in data_names:
if data_name in data_units:
# if units specified in parameters, then convert to string
units = str(data_units[data_name])
data[data_name] = data_data[data_name] * UREG(units)
elif np.issubdtype(data_data[data_name].dtype, str):
# if no units specified and is string
data[data_name] = data_data[data_name].tolist()
else:
data[data_name] = data_data[data_name]
return data | [
"def",
"_apply_units",
"(",
"data_data",
",",
"data_units",
",",
"fname",
")",
":",
"data_names",
"=",
"data_data",
".",
"dtype",
".",
"names",
"# raise error if NumPy data doesn't have names",
"if",
"not",
"data_names",
":",
"raise",
"UnnamedDataError",
"(",
"fname... | Apply units to data.
:param data_data: NumPy structured array with data from fname.
:type data_data: :class:`numpy.ndarray`
:param data_units: Units of fields in data_data.
:type data_units: dict
:param fname: Name of file from which data_data was read.
:type fname: str
:returns: Dictionary of data with units applied.
:rtype: dict
:raises: :exc:`~simkit.core.exceptions.UnnamedDataError` | [
"Apply",
"units",
"to",
"data",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L608-L638 | train | 49,389 |
BreakingBytes/simkit | simkit/core/data_readers.py | _utf8_list_to_ascii_tuple | def _utf8_list_to_ascii_tuple(utf8_list):
"""
Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list
"""
for n, utf8 in enumerate(utf8_list):
utf8_list[n][0] = str(utf8[0])
utf8_list[n][1] = str(utf8[1])
utf8_list[n] = tuple(utf8) | python | def _utf8_list_to_ascii_tuple(utf8_list):
"""
Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list
"""
for n, utf8 in enumerate(utf8_list):
utf8_list[n][0] = str(utf8[0])
utf8_list[n][1] = str(utf8[1])
utf8_list[n] = tuple(utf8) | [
"def",
"_utf8_list_to_ascii_tuple",
"(",
"utf8_list",
")",
":",
"for",
"n",
",",
"utf8",
"in",
"enumerate",
"(",
"utf8_list",
")",
":",
"utf8_list",
"[",
"n",
"]",
"[",
"0",
"]",
"=",
"str",
"(",
"utf8",
"[",
"0",
"]",
")",
"utf8_list",
"[",
"n",
"... | Convert unicode strings in a list of lists to ascii in a list of tuples.
:param utf8_list: A nested list of unicode strings.
:type utf8_list: list | [
"Convert",
"unicode",
"strings",
"in",
"a",
"list",
"of",
"lists",
"to",
"ascii",
"in",
"a",
"list",
"of",
"tuples",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L641-L651 | train | 49,390 |
BreakingBytes/simkit | simkit/core/data_readers.py | JSONReader.load_data | def load_data(self, filename, *args, **kwargs):
"""
Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict
"""
# append .json extension if needed
if not filename.endswith('.json'):
filename += '.json' # append "json" to filename
# open file and load JSON data
with open(filename, 'r') as fid:
json_data = json.load(fid)
# if JSONReader is the original reader then apply units and return
if (not self.orig_data_reader or
isinstance(self, self.orig_data_reader)):
return self.apply_units_to_cache(json_data['data'])
# last modification since JSON file was saved
utc_mod_time = json_data.get('utc_mod_time')
# instance of original data reader with original parameters
orig_data_reader_obj = self.orig_data_reader(self.parameters, self.meta)
# check if file has been modified since saved as JSON file
if utc_mod_time:
# convert to ordered tuple
utc_mod_time = time.struct_time(utc_mod_time)
orig_filename = filename[:-5] # original filename
# use original file if it's been modified since JSON file saved
if utc_mod_time < time.gmtime(os.path.getmtime(orig_filename)):
os.remove(filename) # delete JSON file
return orig_data_reader_obj.load_data(orig_filename)
# use JSON file if original file hasn't been modified
return orig_data_reader_obj.apply_units_to_cache(json_data['data']) | python | def load_data(self, filename, *args, **kwargs):
"""
Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict
"""
# append .json extension if needed
if not filename.endswith('.json'):
filename += '.json' # append "json" to filename
# open file and load JSON data
with open(filename, 'r') as fid:
json_data = json.load(fid)
# if JSONReader is the original reader then apply units and return
if (not self.orig_data_reader or
isinstance(self, self.orig_data_reader)):
return self.apply_units_to_cache(json_data['data'])
# last modification since JSON file was saved
utc_mod_time = json_data.get('utc_mod_time')
# instance of original data reader with original parameters
orig_data_reader_obj = self.orig_data_reader(self.parameters, self.meta)
# check if file has been modified since saved as JSON file
if utc_mod_time:
# convert to ordered tuple
utc_mod_time = time.struct_time(utc_mod_time)
orig_filename = filename[:-5] # original filename
# use original file if it's been modified since JSON file saved
if utc_mod_time < time.gmtime(os.path.getmtime(orig_filename)):
os.remove(filename) # delete JSON file
return orig_data_reader_obj.load_data(orig_filename)
# use JSON file if original file hasn't been modified
return orig_data_reader_obj.apply_units_to_cache(json_data['data']) | [
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# append .json extension if needed",
"if",
"not",
"filename",
".",
"endswith",
"(",
"'.json'",
")",
":",
"filename",
"+=",
"'.json'",
"# append \"json\" to ... | Load JSON data.
:param filename: name of JSON file with data
:type filename: str
:return: data
:rtype: dict | [
"Load",
"JSON",
"data",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L130-L163 | train | 49,391 |
BreakingBytes/simkit | simkit/core/data_readers.py | XLRDReader.load_data | def load_data(self, filename, *args, **kwargs):
"""
Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict
"""
# workbook read from file
workbook = open_workbook(filename, verbosity=True)
data = {} # an empty dictionary to store data
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
sheet = pval['extras']['sheet']
# get each worksheet from the workbook
worksheet = workbook.sheet_by_name(sheet)
# split the parameter's range elements
prng0, prng1 = pval['extras']['range']
# missing "units", json ``null`` and Python ``None`` all OK!
# convert to str from unicode, None to '' (dimensionless)
punits = str(pval.get('units') or '')
# replace None with empty list
if prng0 is None:
prng0 = []
if prng1 is None:
prng1 = []
# FIXME: Use duck-typing here instead of type-checking!
# if both elements in range are `int` then parameter is a cell
if isinstance(prng0, int) and isinstance(prng1, int):
datum = worksheet.cell_value(prng0, prng1)
# if the either element is a `list` then parameter is a slice
elif isinstance(prng0, list) and isinstance(prng1, int):
datum = worksheet.col_values(prng1, *prng0)
elif isinstance(prng0, int) and isinstance(prng1, list):
datum = worksheet.row_values(prng0, *prng1)
# if both elements are `list` then parameter is 2-D
else:
datum = []
for col in xrange(prng0[1], prng1[1]):
datum.append(worksheet.col_values(col, prng0[0],
prng1[0]))
# duck typing that datum is real
try:
npdatum = np.array(datum, dtype=np.float)
except ValueError as err:
# check for iterable:
# if `datum` can't be coerced to float, then it must be
# *string* & strings *are* iterables, so don't check!
# check for strings:
# data must be real or *all* strings!
# empty string, None or JSON null also OK
# all([]) == True but any([]) == False
if not datum:
data[param] = None # convert empty to None
elif all(isinstance(_, basestring) for _ in datum):
data[param] = datum # all str is OK (EG all 'TMY')
elif all(not _ for _ in datum):
data[param] = None # convert list of empty to None
else:
raise err # raise ValueError if not all real or str
else:
data[param] = npdatum * UREG(punits)
# FYI: only put one statement into try-except test otherwise
# might catch different error than expected. use ``else`` as
# option to execute only if exception *not* raised.
return data | python | def load_data(self, filename, *args, **kwargs):
"""
Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict
"""
# workbook read from file
workbook = open_workbook(filename, verbosity=True)
data = {} # an empty dictionary to store data
# iterate through sheets in parameters
# iterate through the parameters on each sheet
for param, pval in self.parameters.iteritems():
sheet = pval['extras']['sheet']
# get each worksheet from the workbook
worksheet = workbook.sheet_by_name(sheet)
# split the parameter's range elements
prng0, prng1 = pval['extras']['range']
# missing "units", json ``null`` and Python ``None`` all OK!
# convert to str from unicode, None to '' (dimensionless)
punits = str(pval.get('units') or '')
# replace None with empty list
if prng0 is None:
prng0 = []
if prng1 is None:
prng1 = []
# FIXME: Use duck-typing here instead of type-checking!
# if both elements in range are `int` then parameter is a cell
if isinstance(prng0, int) and isinstance(prng1, int):
datum = worksheet.cell_value(prng0, prng1)
# if the either element is a `list` then parameter is a slice
elif isinstance(prng0, list) and isinstance(prng1, int):
datum = worksheet.col_values(prng1, *prng0)
elif isinstance(prng0, int) and isinstance(prng1, list):
datum = worksheet.row_values(prng0, *prng1)
# if both elements are `list` then parameter is 2-D
else:
datum = []
for col in xrange(prng0[1], prng1[1]):
datum.append(worksheet.col_values(col, prng0[0],
prng1[0]))
# duck typing that datum is real
try:
npdatum = np.array(datum, dtype=np.float)
except ValueError as err:
# check for iterable:
# if `datum` can't be coerced to float, then it must be
# *string* & strings *are* iterables, so don't check!
# check for strings:
# data must be real or *all* strings!
# empty string, None or JSON null also OK
# all([]) == True but any([]) == False
if not datum:
data[param] = None # convert empty to None
elif all(isinstance(_, basestring) for _ in datum):
data[param] = datum # all str is OK (EG all 'TMY')
elif all(not _ for _ in datum):
data[param] = None # convert list of empty to None
else:
raise err # raise ValueError if not all real or str
else:
data[param] = npdatum * UREG(punits)
# FYI: only put one statement into try-except test otherwise
# might catch different error than expected. use ``else`` as
# option to execute only if exception *not* raised.
return data | [
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# workbook read from file",
"workbook",
"=",
"open_workbook",
"(",
"filename",
",",
"verbosity",
"=",
"True",
")",
"data",
"=",
"{",
"}",
"# an empty di... | Load parameters from Excel spreadsheet.
:param filename: Name of Excel workbook with data.
:type filename: str
:returns: Data read from Excel workbook.
:rtype: dict | [
"Load",
"parameters",
"from",
"Excel",
"spreadsheet",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L243-L310 | train | 49,392 |
BreakingBytes/simkit | simkit/core/data_readers.py | ParameterizedXLS.load_data | def load_data(self, filename, *args, **kwargs):
"""
Load parameterized data from different sheets.
"""
# load parameterized data
data = super(ParameterizedXLS, self).load_data(filename)
# add parameter to data
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
# number of sheets
num_sheets = len(self.parameterization['parameter']['sheets'])
# parse and concatenate parameterized data
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k) # remove unused data keys
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data | python | def load_data(self, filename, *args, **kwargs):
"""
Load parameterized data from different sheets.
"""
# load parameterized data
data = super(ParameterizedXLS, self).load_data(filename)
# add parameter to data
parameter_name = self.parameterization['parameter']['name']
parameter_values = self.parameterization['parameter']['values']
parameter_units = str(self.parameterization['parameter']['units'])
data[parameter_name] = parameter_values * UREG(parameter_units)
# number of sheets
num_sheets = len(self.parameterization['parameter']['sheets'])
# parse and concatenate parameterized data
for key in self.parameterization['data']:
units = str(self.parameterization['data'][key].get('units')) or ''
datalist = []
for n in xrange(num_sheets):
k = key + '_' + str(n)
datalist.append(data[k].reshape((1, -1)))
data.pop(k) # remove unused data keys
data[key] = np.concatenate(datalist, axis=0) * UREG(units)
return data | [
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# load parameterized data",
"data",
"=",
"super",
"(",
"ParameterizedXLS",
",",
"self",
")",
".",
"load_data",
"(",
"filename",
")",
"# add parameter to d... | Load parameterized data from different sheets. | [
"Load",
"parameterized",
"data",
"from",
"different",
"sheets",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L674-L696 | train | 49,393 |
BreakingBytes/simkit | simkit/core/data_readers.py | MixedTextXLS.load_data | def load_data(self, filename, *args, **kwargs):
"""
Load text data from different sheets.
"""
# load text data
data = super(MixedTextXLS, self).load_data(filename)
# iterate through sheets in parameters
for sheet_params in self.parameters.itervalues():
# iterate through the parameters on each sheet
for param, pval in sheet_params.iteritems():
pattern = pval.get('pattern', EFG_PATTERN) # get pattern
re_meth = pval.get('method', 'search') # get re method
# whitelist re methods, getattr could be considered harmful
if re_meth in RE_METH:
re_meth = getattr(re, pval.get('method', 'search'))
else:
msg = 'Only', '"%s", ' * len(RE_METH) % tuple(RE_METH)
msg += 'regex methods are allowed.'
raise AttributeError(msg)
# if not isinstance(data[param], basestring):
# re_meth = lambda p, dp: [re_meth(p, d) for d in dp]
match = re_meth(pattern, data[param]) # get matches
if match:
try:
match = match.groups()
except AttributeError:
match = [m.groups() for m in match]
npdata = np.array(match, dtype=float).squeeze()
data[param] = npdata * UREG(str(pval.get('units') or ''))
else:
raise MixedTextNoMatchError(re_meth, pattern, data[param])
return data | python | def load_data(self, filename, *args, **kwargs):
"""
Load text data from different sheets.
"""
# load text data
data = super(MixedTextXLS, self).load_data(filename)
# iterate through sheets in parameters
for sheet_params in self.parameters.itervalues():
# iterate through the parameters on each sheet
for param, pval in sheet_params.iteritems():
pattern = pval.get('pattern', EFG_PATTERN) # get pattern
re_meth = pval.get('method', 'search') # get re method
# whitelist re methods, getattr could be considered harmful
if re_meth in RE_METH:
re_meth = getattr(re, pval.get('method', 'search'))
else:
msg = 'Only', '"%s", ' * len(RE_METH) % tuple(RE_METH)
msg += 'regex methods are allowed.'
raise AttributeError(msg)
# if not isinstance(data[param], basestring):
# re_meth = lambda p, dp: [re_meth(p, d) for d in dp]
match = re_meth(pattern, data[param]) # get matches
if match:
try:
match = match.groups()
except AttributeError:
match = [m.groups() for m in match]
npdata = np.array(match, dtype=float).squeeze()
data[param] = npdata * UREG(str(pval.get('units') or ''))
else:
raise MixedTextNoMatchError(re_meth, pattern, data[param])
return data | [
"def",
"load_data",
"(",
"self",
",",
"filename",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# load text data",
"data",
"=",
"super",
"(",
"MixedTextXLS",
",",
"self",
")",
".",
"load_data",
"(",
"filename",
")",
"# iterate through sheets in param... | Load text data from different sheets. | [
"Load",
"text",
"data",
"from",
"different",
"sheets",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/data_readers.py#L774-L805 | train | 49,394 |
Yelp/uwsgi_metrics | uwsgi_metrics/meter.py | Meter.mark | def mark(self, n=1):
"""Mark the occurrence of a given number of events."""
self.tick_if_necessary()
self.count += n
self.m1_rate.update(n)
self.m5_rate.update(n)
self.m15_rate.update(n) | python | def mark(self, n=1):
"""Mark the occurrence of a given number of events."""
self.tick_if_necessary()
self.count += n
self.m1_rate.update(n)
self.m5_rate.update(n)
self.m15_rate.update(n) | [
"def",
"mark",
"(",
"self",
",",
"n",
"=",
"1",
")",
":",
"self",
".",
"tick_if_necessary",
"(",
")",
"self",
".",
"count",
"+=",
"n",
"self",
".",
"m1_rate",
".",
"update",
"(",
"n",
")",
"self",
".",
"m5_rate",
".",
"update",
"(",
"n",
")",
"... | Mark the occurrence of a given number of events. | [
"Mark",
"the",
"occurrence",
"of",
"a",
"given",
"number",
"of",
"events",
"."
] | 534966fd461ff711aecd1e3d4caaafdc23ac33f0 | https://github.com/Yelp/uwsgi_metrics/blob/534966fd461ff711aecd1e3d4caaafdc23ac33f0/uwsgi_metrics/meter.py#L32-L38 | train | 49,395 |
BreakingBytes/simkit | simkit/core/models.py | Model._update | def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path) | python | def _update(self, layer=None):
"""
Update layers in model.
"""
meta = getattr(self, ModelBase._meta_attr)
if not layer:
layers = self.layers
else:
# convert non-sequence to tuple
layers = _listify(layer)
for layer in layers:
# relative path to layer files from model file
path = os.path.abspath(os.path.join(meta.modelpath, layer))
getattr(self, layer).load(path) | [
"def",
"_update",
"(",
"self",
",",
"layer",
"=",
"None",
")",
":",
"meta",
"=",
"getattr",
"(",
"self",
",",
"ModelBase",
".",
"_meta_attr",
")",
"if",
"not",
"layer",
":",
"layers",
"=",
"self",
".",
"layers",
"else",
":",
"# convert non-sequence to tu... | Update layers in model. | [
"Update",
"layers",
"in",
"model",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L151-L164 | train | 49,396 |
BreakingBytes/simkit | simkit/core/models.py | Model._initialize | def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized' | python | def _initialize(self):
"""
Initialize model and layers.
"""
meta = getattr(self, ModelBase._meta_attr)
# read modelfile, convert JSON and load/update model
if self.param_file is not None:
self._load()
LOGGER.debug('model:\n%r', self.model)
# initialize layers
# FIXME: move import inside loop for custom layers in different modules
mod = importlib.import_module(meta.layers_mod, meta.layers_pkg)
src_model = {}
for layer, value in self.model.iteritems():
# from layers module get the layer's class definition
layer_cls = getattr(mod, meta.layer_cls_names[layer]) # class def
self.layers[layer] = layer_cls # add layer class def to model
# check if model layers are classes
src_value = {} # layer value generated from source classes
for src in value['sources']:
# check if source has keyword arguments
try:
src, kwargs = src
except (TypeError, ValueError):
kwargs = {} # no key work arguments
# skip if not a source class
if isinstance(src, basestring):
continue
# generate layer value from source class
src_value[src.__name__] = {'module': src.__module__,
'package': None}
# update layer keyword arguments
src_value[src.__name__].update(kwargs)
# use layer values generated from source class
if src_value:
value = src_model[layer] = src_value
else:
srcmod, srcpkg = value.get('module'), value.get('package')
try:
value = dict(value['sources'])
except ValueError:
value = dict.fromkeys(value['sources'], {})
for src in value.viewkeys():
if srcmod is not None:
value[src]['module'] = srcmod
if srcpkg is not None:
value[src]['package'] = srcpkg
# set layer attribute with model data
setattr(self, layer, layer_cls(value))
# update model with layer values generated from source classes
if src_model:
self.model.update(src_model)
self._update()
self._state = 'initialized' | [
"def",
"_initialize",
"(",
"self",
")",
":",
"meta",
"=",
"getattr",
"(",
"self",
",",
"ModelBase",
".",
"_meta_attr",
")",
"# read modelfile, convert JSON and load/update model",
"if",
"self",
".",
"param_file",
"is",
"not",
"None",
":",
"self",
".",
"_load",
... | Initialize model and layers. | [
"Initialize",
"model",
"and",
"layers",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L166-L219 | train | 49,397 |
BreakingBytes/simkit | simkit/core/models.py | Model.load | def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer) | python | def load(self, modelfile, layer=None):
"""
Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str
"""
# read modelfile, convert JSON and load/update model
self.param_file = modelfile
self._load(layer)
self._update(layer) | [
"def",
"load",
"(",
"self",
",",
"modelfile",
",",
"layer",
"=",
"None",
")",
":",
"# read modelfile, convert JSON and load/update model",
"self",
".",
"param_file",
"=",
"modelfile",
"self",
".",
"_load",
"(",
"layer",
")",
"self",
".",
"_update",
"(",
"layer... | Load or update a model or layers in a model.
:param modelfile: The name of the json file to load.
:type modelfile: str
:param layer: Optionally load only specified layer.
:type layer: str | [
"Load",
"or",
"update",
"a",
"model",
"or",
"layers",
"in",
"a",
"model",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L221-L233 | train | 49,398 |
BreakingBytes/simkit | simkit/core/models.py | Model.edit | def edit(self, layer, item, delete=False):
"""
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
"""
# get layer attribute with model data
if hasattr(self, layer):
layer_obj = getattr(self, layer)
else:
raise AttributeError('missing layer: %s', layer)
if delete:
return layer_obj
# iterate over items and edit layer
for k, v in item.iteritems():
if k in layer_obj.layer:
layer_obj.edit(k, v) # edit layer
else:
raise AttributeError('missing layer item: %s', k)
# update model data
if k in self.model[layer]:
self.model[layer][k].update(v)
else:
raise AttributeError('missing model layer item: %s', k) | python | def edit(self, layer, item, delete=False):
"""
Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool
"""
# get layer attribute with model data
if hasattr(self, layer):
layer_obj = getattr(self, layer)
else:
raise AttributeError('missing layer: %s', layer)
if delete:
return layer_obj
# iterate over items and edit layer
for k, v in item.iteritems():
if k in layer_obj.layer:
layer_obj.edit(k, v) # edit layer
else:
raise AttributeError('missing layer item: %s', k)
# update model data
if k in self.model[layer]:
self.model[layer][k].update(v)
else:
raise AttributeError('missing model layer item: %s', k) | [
"def",
"edit",
"(",
"self",
",",
"layer",
",",
"item",
",",
"delete",
"=",
"False",
")",
":",
"# get layer attribute with model data",
"if",
"hasattr",
"(",
"self",
",",
"layer",
")",
":",
"layer_obj",
"=",
"getattr",
"(",
"self",
",",
"layer",
")",
"els... | Edit model.
:param layer: Layer of model to edit
:type layer: str
:param item: Items to edit.
:type item: dict
:param delete: Flag to return
:class:`~simkit.core.layers.Layer` to delete item.
:type delete: bool | [
"Edit",
"model",
"."
] | 205163d879d3880b6c9ef609f1b723a58773026b | https://github.com/BreakingBytes/simkit/blob/205163d879d3880b6c9ef609f1b723a58773026b/simkit/core/models.py#L235-L264 | train | 49,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.