body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
189055a351d5ea271c0309332dfc210415a71d201adf28e9952a255ef4d18c3e | def testPopUpContent(self):
'Tests that pop up window renders correct content'
driver = self.browser
driver.implicitly_wait(10)
driver.get('http://localhost:5000/user/login')
main_window_handle = driver.window_handles[0]
driver.find_element_by_id('facebook-btn').click()
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
content_wrapper = driver.find_element_by_id('content')
content_wrapper_text = content_wrapper.text
expected_content_text = 'Log in to use your Facebook account with Ckanext-socialite.'
self.assertIn(expected_content_text, content_wrapper_text) | Tests that pop up window renders correct content | ckanext/socialite/tests/test_e2e/test_facebookauth.py | testPopUpContent | ccancellieri/ckanext-socialite | 1 | python | def testPopUpContent(self):
driver = self.browser
driver.implicitly_wait(10)
driver.get('http://localhost:5000/user/login')
main_window_handle = driver.window_handles[0]
driver.find_element_by_id('facebook-btn').click()
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
content_wrapper = driver.find_element_by_id('content')
content_wrapper_text = content_wrapper.text
expected_content_text = 'Log in to use your Facebook account with Ckanext-socialite.'
self.assertIn(expected_content_text, content_wrapper_text) | def testPopUpContent(self):
driver = self.browser
driver.implicitly_wait(10)
driver.get('http://localhost:5000/user/login')
main_window_handle = driver.window_handles[0]
driver.find_element_by_id('facebook-btn').click()
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
content_wrapper = driver.find_element_by_id('content')
content_wrapper_text = content_wrapper.text
expected_content_text = 'Log in to use your Facebook account with Ckanext-socialite.'
self.assertIn(expected_content_text, content_wrapper_text)<|docstring|>Tests that pop up window renders correct content<|endoftext|> |
51344c1584ff2684015683c58d9b99eca945e8030159fb1d5dbf6881f048ca10 | def testSuccessfulLogin(self):
'Tests that user is redirected to DataSets page on successful login'
driver = self.browser
driver.implicitly_wait(50)
driver.get('http://localhost:5000/user/login')
button_link = driver.find_element_by_xpath(u'//a[@id="facebook-btn"]')
try:
button_link.click()
main_window_handle = driver.window_handles[0]
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
email_input = driver.find_element_by_id('email').send_keys('example@example.com')
password_input = driver.find_element_by_id('pass').send_keys('ckanext&socialite')
submit_button = WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.NAME, 'login')))
submit_button.submit()
driver.switch_to.window(main_window_handle)
WebDriverWait(driver, 20).until(EC.title_contains('Datasets - CKAN'))
self.assertIn(driver.title, 'Datasets - CKAN')
finally:
driver.quit() | Tests that user is redirected to DataSets page on successful login | ckanext/socialite/tests/test_e2e/test_facebookauth.py | testSuccessfulLogin | ccancellieri/ckanext-socialite | 1 | python | def testSuccessfulLogin(self):
driver = self.browser
driver.implicitly_wait(50)
driver.get('http://localhost:5000/user/login')
button_link = driver.find_element_by_xpath(u'//a[@id="facebook-btn"]')
try:
button_link.click()
main_window_handle = driver.window_handles[0]
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
email_input = driver.find_element_by_id('email').send_keys('example@example.com')
password_input = driver.find_element_by_id('pass').send_keys('ckanext&socialite')
submit_button = WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.NAME, 'login')))
submit_button.submit()
driver.switch_to.window(main_window_handle)
WebDriverWait(driver, 20).until(EC.title_contains('Datasets - CKAN'))
self.assertIn(driver.title, 'Datasets - CKAN')
finally:
driver.quit() | def testSuccessfulLogin(self):
driver = self.browser
driver.implicitly_wait(50)
driver.get('http://localhost:5000/user/login')
button_link = driver.find_element_by_xpath(u'//a[@id="facebook-btn"]')
try:
button_link.click()
main_window_handle = driver.window_handles[0]
signin_window_handle = driver.window_handles[1]
driver.switch_to.window(signin_window_handle)
email_input = driver.find_element_by_id('email').send_keys('example@example.com')
password_input = driver.find_element_by_id('pass').send_keys('ckanext&socialite')
submit_button = WebDriverWait(driver, 100).until(EC.element_to_be_clickable((By.NAME, 'login')))
submit_button.submit()
driver.switch_to.window(main_window_handle)
WebDriverWait(driver, 20).until(EC.title_contains('Datasets - CKAN'))
self.assertIn(driver.title, 'Datasets - CKAN')
finally:
driver.quit()<|docstring|>Tests that user is redirected to DataSets page on successful login<|endoftext|> |
e944c4c1b4019223f43ea15dc529c02bee648d3c36998549534f633b8b2dbb80 | def spline_trajectory(t: ArrayLike, control_points: ArrayLike, *, t_control: Optional[ArrayLike]=None, degree: int=3, t_min: float=0, t_max: float=1, derivative: int=0) -> np.ndarray:
"Evaluate the trajectory given by control_points at t using B-spline\n interpolation.\n\n ``spline_trajectory`` constructs a ``degree``-times differentiable\n trajectory using the given control points and then evaluates the resulting\n trajectory at ``t``. It does so using B-splines. By default, control points\n are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``\n results in ``control_points[0]`` and ``t=t_max`` results in\n ``control_poins[-1]``. Alternatively, the spacing of control points can be\n set manually by specifying ``t_control``, which implicitly specifies\n ``t_min`` and ``t_max``.\n\n Parameters\n ----------\n t : np.ndarray\n An array containing positions at which to evaluate the trajectory.\n Elements of ``t`` must be within ``[t_min, t_max]``.\n control_points : np.ndarray\n A batch of control points used to construct the trajectory. The first\n dimension of the array is interpreted as batch dimension and the\n remaining dimensions are used to interpolate between. By default,\n control points are equally spaced within ``[t_min, t_max]`` unless\n ``t_control`` is given explicitly.\n t_control : np.ndarray, None\n A sequence of strictly increasing floats determining the position of the\n control points along the trajectory. None by default, which results in\n an equidistant spacing of points.\n degree : int\n The degree of the spline; uneven numbers are preferred. The resulting\n spline is k times continously differentiable.\n t_min : float\n Minimum value of the trajectories parametrization. Must be smaller than\n ``t_max``. If ``t_control`` is set, this value is ignored in favor of\n ``t_min=t_control[0]``\n t_max : float\n Maximum value of the trajectories parametrization. Must be larger than\n ``t_min``. If ``t_control`` is set, this value is ignored in favor of\n ``t_max=t_control[-1]``.\n derivative : int\n The derivative of the interpolated trajectory to compute. For example,\n ``derivative=2`` differentiates the trajectory twice with respect to\n ``t`` and then evaluates the derivative at the given ``t``.\n\n Returns\n -------\n position : np.ndarray\n The value of the trajectory at ``t``.\n\n Notes\n -----\n The dimension of the space embedding the trajectory must be less than 12,\n i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,\n please open an issue; a workaround is to split the trajectory into chunks\n of less than 11 dimensions each.\n\n Repeated evaluation of single points on the trajectory, i.e. repeatedly\n calling this function with scalar ``t``, is possible, but will repeatedly\n reconstruct the trajectory, which can lead to unnecessary slowdown. For\n better performance, it is preferred to use an array-like ``t``.\n\n Examples\n --------\n\n .. plot::\n :include-source:\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from skbot.trajectory import spline_trajectory\n >>> t1 = np.linspace(0, 2*np.pi, 10)\n >>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)\n >>> t2 = np.linspace(0, 2*np.pi, 100)\n >>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)\n >>> fig, ax = plt.subplots()\n >>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')\n >>> fig.legend(('Trajectory', 'Control Points'))\n >>> plt.show()\n\n "
t = np.asarray(t)
control_points = np.asarray(control_points)
if (control_points.ndim == 1):
control_points = control_points[(:, None)]
if (t_control is None):
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[(- 1)]
(tck, u) = splprep(control_points.T, u=t_control, s=0, ub=t_min, ue=t_max, k=degree)
return np.stack(splev(t, tck, der=derivative, ext=2), axis=(- 1)) | Evaluate the trajectory given by control_points at t using B-spline
interpolation.
``spline_trajectory`` constructs a ``degree``-times differentiable
trajectory using the given control points and then evaluates the resulting
trajectory at ``t``. It does so using B-splines. By default, control points
are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``
results in ``control_points[0]`` and ``t=t_max`` results in
``control_poins[-1]``. Alternatively, the spacing of control points can be
set manually by specifying ``t_control``, which implicitly specifies
``t_min`` and ``t_max``.
Parameters
----------
t : np.ndarray
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : np.ndarray
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : np.ndarray, None
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
degree : int
The degree of the spline; uneven numbers are preferred. The resulting
spline is k times continously differentiable.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``. If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
derivative : int
The derivative of the interpolated trajectory to compute. For example,
``derivative=2`` differentiates the trajectory twice with respect to
``t`` and then evaluates the derivative at the given ``t``.
Returns
-------
position : np.ndarray
The value of the trajectory at ``t``.
Notes
-----
The dimension of the space embedding the trajectory must be less than 12,
i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,
please open an issue; a workaround is to split the trajectory into chunks
of less than 11 dimensions each.
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import spline_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show() | skbot/trajectory/spline.py | spline_trajectory | FirefoxMetzger/ropy | 6 | python | def spline_trajectory(t: ArrayLike, control_points: ArrayLike, *, t_control: Optional[ArrayLike]=None, degree: int=3, t_min: float=0, t_max: float=1, derivative: int=0) -> np.ndarray:
"Evaluate the trajectory given by control_points at t using B-spline\n interpolation.\n\n ``spline_trajectory`` constructs a ``degree``-times differentiable\n trajectory using the given control points and then evaluates the resulting\n trajectory at ``t``. It does so using B-splines. By default, control points\n are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``\n results in ``control_points[0]`` and ``t=t_max`` results in\n ``control_poins[-1]``. Alternatively, the spacing of control points can be\n set manually by specifying ``t_control``, which implicitly specifies\n ``t_min`` and ``t_max``.\n\n Parameters\n ----------\n t : np.ndarray\n An array containing positions at which to evaluate the trajectory.\n Elements of ``t`` must be within ``[t_min, t_max]``.\n control_points : np.ndarray\n A batch of control points used to construct the trajectory. The first\n dimension of the array is interpreted as batch dimension and the\n remaining dimensions are used to interpolate between. By default,\n control points are equally spaced within ``[t_min, t_max]`` unless\n ``t_control`` is given explicitly.\n t_control : np.ndarray, None\n A sequence of strictly increasing floats determining the position of the\n control points along the trajectory. None by default, which results in\n an equidistant spacing of points.\n degree : int\n The degree of the spline; uneven numbers are preferred. The resulting\n spline is k times continously differentiable.\n t_min : float\n Minimum value of the trajectories parametrization. Must be smaller than\n ``t_max``. If ``t_control`` is set, this value is ignored in favor of\n ``t_min=t_control[0]``\n t_max : float\n Maximum value of the trajectories parametrization. Must be larger than\n ``t_min``. If ``t_control`` is set, this value is ignored in favor of\n ``t_max=t_control[-1]``.\n derivative : int\n The derivative of the interpolated trajectory to compute. For example,\n ``derivative=2`` differentiates the trajectory twice with respect to\n ``t`` and then evaluates the derivative at the given ``t``.\n\n Returns\n -------\n position : np.ndarray\n The value of the trajectory at ``t``.\n\n Notes\n -----\n The dimension of the space embedding the trajectory must be less than 12,\n i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,\n please open an issue; a workaround is to split the trajectory into chunks\n of less than 11 dimensions each.\n\n Repeated evaluation of single points on the trajectory, i.e. repeatedly\n calling this function with scalar ``t``, is possible, but will repeatedly\n reconstruct the trajectory, which can lead to unnecessary slowdown. For\n better performance, it is preferred to use an array-like ``t``.\n\n Examples\n --------\n\n .. plot::\n :include-source:\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from skbot.trajectory import spline_trajectory\n >>> t1 = np.linspace(0, 2*np.pi, 10)\n >>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)\n >>> t2 = np.linspace(0, 2*np.pi, 100)\n >>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)\n >>> fig, ax = plt.subplots()\n >>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')\n >>> fig.legend(('Trajectory', 'Control Points'))\n >>> plt.show()\n\n "
t = np.asarray(t)
control_points = np.asarray(control_points)
if (control_points.ndim == 1):
control_points = control_points[(:, None)]
if (t_control is None):
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[(- 1)]
(tck, u) = splprep(control_points.T, u=t_control, s=0, ub=t_min, ue=t_max, k=degree)
return np.stack(splev(t, tck, der=derivative, ext=2), axis=(- 1)) | def spline_trajectory(t: ArrayLike, control_points: ArrayLike, *, t_control: Optional[ArrayLike]=None, degree: int=3, t_min: float=0, t_max: float=1, derivative: int=0) -> np.ndarray:
"Evaluate the trajectory given by control_points at t using B-spline\n interpolation.\n\n ``spline_trajectory`` constructs a ``degree``-times differentiable\n trajectory using the given control points and then evaluates the resulting\n trajectory at ``t``. It does so using B-splines. By default, control points\n are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``\n results in ``control_points[0]`` and ``t=t_max`` results in\n ``control_poins[-1]``. Alternatively, the spacing of control points can be\n set manually by specifying ``t_control``, which implicitly specifies\n ``t_min`` and ``t_max``.\n\n Parameters\n ----------\n t : np.ndarray\n An array containing positions at which to evaluate the trajectory.\n Elements of ``t`` must be within ``[t_min, t_max]``.\n control_points : np.ndarray\n A batch of control points used to construct the trajectory. The first\n dimension of the array is interpreted as batch dimension and the\n remaining dimensions are used to interpolate between. By default,\n control points are equally spaced within ``[t_min, t_max]`` unless\n ``t_control`` is given explicitly.\n t_control : np.ndarray, None\n A sequence of strictly increasing floats determining the position of the\n control points along the trajectory. None by default, which results in\n an equidistant spacing of points.\n degree : int\n The degree of the spline; uneven numbers are preferred. The resulting\n spline is k times continously differentiable.\n t_min : float\n Minimum value of the trajectories parametrization. Must be smaller than\n ``t_max``. If ``t_control`` is set, this value is ignored in favor of\n ``t_min=t_control[0]``\n t_max : float\n Maximum value of the trajectories parametrization. Must be larger than\n ``t_min``. If ``t_control`` is set, this value is ignored in favor of\n ``t_max=t_control[-1]``.\n derivative : int\n The derivative of the interpolated trajectory to compute. For example,\n ``derivative=2`` differentiates the trajectory twice with respect to\n ``t`` and then evaluates the derivative at the given ``t``.\n\n Returns\n -------\n position : np.ndarray\n The value of the trajectory at ``t``.\n\n Notes\n -----\n The dimension of the space embedding the trajectory must be less than 12,\n i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,\n please open an issue; a workaround is to split the trajectory into chunks\n of less than 11 dimensions each.\n\n Repeated evaluation of single points on the trajectory, i.e. repeatedly\n calling this function with scalar ``t``, is possible, but will repeatedly\n reconstruct the trajectory, which can lead to unnecessary slowdown. For\n better performance, it is preferred to use an array-like ``t``.\n\n Examples\n --------\n\n .. plot::\n :include-source:\n\n >>> import numpy as np\n >>> import matplotlib.pyplot as plt\n >>> from skbot.trajectory import spline_trajectory\n >>> t1 = np.linspace(0, 2*np.pi, 10)\n >>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)\n >>> t2 = np.linspace(0, 2*np.pi, 100)\n >>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)\n >>> fig, ax = plt.subplots()\n >>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')\n >>> fig.legend(('Trajectory', 'Control Points'))\n >>> plt.show()\n\n "
t = np.asarray(t)
control_points = np.asarray(control_points)
if (control_points.ndim == 1):
control_points = control_points[(:, None)]
if (t_control is None):
t_control = np.linspace(t_min, t_max, len(control_points), dtype=np.float_)
else:
t_control = np.asarray(t_control)
t_min = t_control[0]
t_max = t_control[(- 1)]
(tck, u) = splprep(control_points.T, u=t_control, s=0, ub=t_min, ue=t_max, k=degree)
return np.stack(splev(t, tck, der=derivative, ext=2), axis=(- 1))<|docstring|>Evaluate the trajectory given by control_points at t using B-spline
interpolation.
``spline_trajectory`` constructs a ``degree``-times differentiable
trajectory using the given control points and then evaluates the resulting
trajectory at ``t``. It does so using B-splines. By default, control points
are spaced out evenly in the interval ``[t_min, t_max]`` where ``t=t_min``
results in ``control_points[0]`` and ``t=t_max`` results in
``control_poins[-1]``. Alternatively, the spacing of control points can be
set manually by specifying ``t_control``, which implicitly specifies
``t_min`` and ``t_max``.
Parameters
----------
t : np.ndarray
An array containing positions at which to evaluate the trajectory.
Elements of ``t`` must be within ``[t_min, t_max]``.
control_points : np.ndarray
A batch of control points used to construct the trajectory. The first
dimension of the array is interpreted as batch dimension and the
remaining dimensions are used to interpolate between. By default,
control points are equally spaced within ``[t_min, t_max]`` unless
``t_control`` is given explicitly.
t_control : np.ndarray, None
A sequence of strictly increasing floats determining the position of the
control points along the trajectory. None by default, which results in
an equidistant spacing of points.
degree : int
The degree of the spline; uneven numbers are preferred. The resulting
spline is k times continously differentiable.
t_min : float
Minimum value of the trajectories parametrization. Must be smaller than
``t_max``. If ``t_control`` is set, this value is ignored in favor of
``t_min=t_control[0]``
t_max : float
Maximum value of the trajectories parametrization. Must be larger than
``t_min``. If ``t_control`` is set, this value is ignored in favor of
``t_max=t_control[-1]``.
derivative : int
The derivative of the interpolated trajectory to compute. For example,
``derivative=2`` differentiates the trajectory twice with respect to
``t`` and then evaluates the derivative at the given ``t``.
Returns
-------
position : np.ndarray
The value of the trajectory at ``t``.
Notes
-----
The dimension of the space embedding the trajectory must be less than 12,
i.e. ``<= 11``, due to limitations in scipy. If more dimensions are needed,
please open an issue; a workaround is to split the trajectory into chunks
of less than 11 dimensions each.
Repeated evaluation of single points on the trajectory, i.e. repeatedly
calling this function with scalar ``t``, is possible, but will repeatedly
reconstruct the trajectory, which can lead to unnecessary slowdown. For
better performance, it is preferred to use an array-like ``t``.
Examples
--------
.. plot::
:include-source:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from skbot.trajectory import spline_trajectory
>>> t1 = np.linspace(0, 2*np.pi, 10)
>>> control_points = np.stack((np.cos(t1), np.sin(t1)), axis=1)
>>> t2 = np.linspace(0, 2*np.pi, 100)
>>> trajectory = spline_trajectory(t2, control_points, t_min=0, t_max=2*np.pi)
>>> fig, ax = plt.subplots()
>>> ax.plot(trajectory[:,0], trajectory[:,1], control_points[:,0], control_points[:,1], 'o')
>>> fig.legend(('Trajectory', 'Control Points'))
>>> plt.show()<|endoftext|> |
fd8bf21991cb30ff61c0d3eb61fd1765b1ea3ea62c53b87a3f0b8ebd8513b906 | def pencil_sketch(path_to_img, width, height, bg_gray='./pencilsketch_bg.jpg'):
'Pencil sketch effect\n\n Applies a pencil sketch effect to an image.\n The processed image is overlayed over a background image for visual effect.\n '
img_rgb = cv2.imread(path_to_img)
img_rgb = cv2.resize(img_rgb, (width, height))
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (21, 21), 0, 0)
img_blend = cv2.divide(img_gray, img_blur, scale=256)
canvas = cv2.imread(bg_gray, cv2.CV_8UC1)
if (canvas is not None):
canvas = cv2.resize(canvas, (width, height))
img_blend = cv2.multiply(img_blend, canvas, scale=(1.0 / 256))
return cv2.cvtColor(img_blend, cv2.COLOR_GRAY2RGB) | Pencil sketch effect
Applies a pencil sketch effect to an image.
The processed image is overlayed over a background image for visual effect. | data/preprocess/sketch.py | pencil_sketch | VIVelev/sketchy-code | 0 | python | def pencil_sketch(path_to_img, width, height, bg_gray='./pencilsketch_bg.jpg'):
'Pencil sketch effect\n\n Applies a pencil sketch effect to an image.\n The processed image is overlayed over a background image for visual effect.\n '
img_rgb = cv2.imread(path_to_img)
img_rgb = cv2.resize(img_rgb, (width, height))
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (21, 21), 0, 0)
img_blend = cv2.divide(img_gray, img_blur, scale=256)
canvas = cv2.imread(bg_gray, cv2.CV_8UC1)
if (canvas is not None):
canvas = cv2.resize(canvas, (width, height))
img_blend = cv2.multiply(img_blend, canvas, scale=(1.0 / 256))
return cv2.cvtColor(img_blend, cv2.COLOR_GRAY2RGB) | def pencil_sketch(path_to_img, width, height, bg_gray='./pencilsketch_bg.jpg'):
'Pencil sketch effect\n\n Applies a pencil sketch effect to an image.\n The processed image is overlayed over a background image for visual effect.\n '
img_rgb = cv2.imread(path_to_img)
img_rgb = cv2.resize(img_rgb, (width, height))
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (21, 21), 0, 0)
img_blend = cv2.divide(img_gray, img_blur, scale=256)
canvas = cv2.imread(bg_gray, cv2.CV_8UC1)
if (canvas is not None):
canvas = cv2.resize(canvas, (width, height))
img_blend = cv2.multiply(img_blend, canvas, scale=(1.0 / 256))
return cv2.cvtColor(img_blend, cv2.COLOR_GRAY2RGB)<|docstring|>Pencil sketch effect
Applies a pencil sketch effect to an image.
The processed image is overlayed over a background image for visual effect.<|endoftext|> |
c8af3ecc8f0831d420edf0053f07ab2102fb03e6ded48caea6b7984738ffac79 | def _budget_date(date):
'Get a budget month date from the actual date.'
if isinstance(date, datetime):
if (date.day > 7):
return datetime(date.year, date.month, 1)
if (date.month > 1):
return datetime(date.year, (date.month - 1), 1)
return datetime((date.year - 1), 12, 1)
raise TypeError('Invalid date {}'.format(date)) | Get a budget month date from the actual date. | dataplaybook/tasks/fnb.py | _budget_date | kellerza/data-playbook | 3 | python | def _budget_date(date):
if isinstance(date, datetime):
if (date.day > 7):
return datetime(date.year, date.month, 1)
if (date.month > 1):
return datetime(date.year, (date.month - 1), 1)
return datetime((date.year - 1), 12, 1)
raise TypeError('Invalid date {}'.format(date)) | def _budget_date(date):
if isinstance(date, datetime):
if (date.day > 7):
return datetime(date.year, date.month, 1)
if (date.month > 1):
return datetime(date.year, (date.month - 1), 1)
return datetime((date.year - 1), 12, 1)
raise TypeError('Invalid date {}'.format(date))<|docstring|>Get a budget month date from the actual date.<|endoftext|> |
ccc031dac7468748933408f51dbe147e1416617fd016fd7496006bfc04675ae9 | def _str_to_date(text, year_month=None):
'Convert statement text date to date.'
if (text is None):
raise ValueError("Could not parse date '{}'".format(text))
match = RE_DATE.match(text)
if (match is None):
match = RE_DATE2.match(text)
if (match is None):
raise ValueError("Could not parse date '{}'".format(text))
return datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)))
(year, month, day) = (match.group(3), match.group(2).lower(), int(match.group(1)))
month = MONTH_MAP.get(month, month)
if isinstance(month, str):
raise ValueError('Invalid text month: {}'.format(month))
if ((year is None) and (year_month is not None)):
year = year_month.year
if ((month == 1) and (year_month.month == 12)):
year += 1
if (year is not None):
year = int(year)
day = min(day, monthrange(year, month)[1])
return datetime(year, month, day) | Convert statement text date to date. | dataplaybook/tasks/fnb.py | _str_to_date | kellerza/data-playbook | 3 | python | def _str_to_date(text, year_month=None):
if (text is None):
raise ValueError("Could not parse date '{}'".format(text))
match = RE_DATE.match(text)
if (match is None):
match = RE_DATE2.match(text)
if (match is None):
raise ValueError("Could not parse date '{}'".format(text))
return datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)))
(year, month, day) = (match.group(3), match.group(2).lower(), int(match.group(1)))
month = MONTH_MAP.get(month, month)
if isinstance(month, str):
raise ValueError('Invalid text month: {}'.format(month))
if ((year is None) and (year_month is not None)):
year = year_month.year
if ((month == 1) and (year_month.month == 12)):
year += 1
if (year is not None):
year = int(year)
day = min(day, monthrange(year, month)[1])
return datetime(year, month, day) | def _str_to_date(text, year_month=None):
if (text is None):
raise ValueError("Could not parse date '{}'".format(text))
match = RE_DATE.match(text)
if (match is None):
match = RE_DATE2.match(text)
if (match is None):
raise ValueError("Could not parse date '{}'".format(text))
return datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)))
(year, month, day) = (match.group(3), match.group(2).lower(), int(match.group(1)))
month = MONTH_MAP.get(month, month)
if isinstance(month, str):
raise ValueError('Invalid text month: {}'.format(month))
if ((year is None) and (year_month is not None)):
year = year_month.year
if ((month == 1) and (year_month.month == 12)):
year += 1
if (year is not None):
year = int(year)
day = min(day, monthrange(year, month)[1])
return datetime(year, month, day)<|docstring|>Convert statement text date to date.<|endoftext|> |
362befa9b9e790d3a884e80bf6e21decdc79b2a18fe25a56f6f63aed3b5db7d4 | @task
def read_cheque_csv(filename: str) -> Table:
'Read an FNB cheque csv file.'
fields = ['type', 'nr', 'date', 'desc1', 'desc2', 'desc3', 'amount', 'saldo', 'koste']
data = {}
with open(filename, newline='', encoding='utf8') as csvfile:
csvreader = csv.DictReader(csvfile, fields)
for row in csvreader:
try:
rowtype = int(row[fields[0]])
except ValueError as err:
raise InvalidFile('read_cheque not a cheque file [{}]'.format(os.path.basename(filename))) from err
if (rowtype == 2):
data['account'] = row[fields[1]]
if (rowtype == 3):
try:
data['id'] = int(row[fields[1]])
except (ValueError, TypeError):
continue
data['date'] = _str_to_date(row[fields[2]])
if (rowtype != 5):
continue
try:
number = int(row[fields[1]])
except (ValueError, TypeError):
continue
tdate = _str_to_date(row['date'], data['date'])
if (tdate is None):
continue
res0 = {'amount': float(row['amount']), 'date': tdate, 'id': '{:0>16s}.{:04d}.{:04d}'.format(data['account'], data['id'], number), 'description': row['desc2'], 'extras': row['desc1']}
if (RE_CARD.match(row['desc3']) is None):
res0['description'] += (' ' + row['desc3'])
else:
res0['extras'] += (' ' + row['desc3'])
if (' kontant' in res0['extras'].lower()):
(res0['extras'], res0['description']) = (res0['description'], res0['extras'])
if (res0['description'] == ''):
res0['description'] = res0['extras']
res0['extras'] = ''
(yield res0) | Read an FNB cheque csv file. | dataplaybook/tasks/fnb.py | read_cheque_csv | kellerza/data-playbook | 3 | python | @task
def read_cheque_csv(filename: str) -> Table:
fields = ['type', 'nr', 'date', 'desc1', 'desc2', 'desc3', 'amount', 'saldo', 'koste']
data = {}
with open(filename, newline=, encoding='utf8') as csvfile:
csvreader = csv.DictReader(csvfile, fields)
for row in csvreader:
try:
rowtype = int(row[fields[0]])
except ValueError as err:
raise InvalidFile('read_cheque not a cheque file [{}]'.format(os.path.basename(filename))) from err
if (rowtype == 2):
data['account'] = row[fields[1]]
if (rowtype == 3):
try:
data['id'] = int(row[fields[1]])
except (ValueError, TypeError):
continue
data['date'] = _str_to_date(row[fields[2]])
if (rowtype != 5):
continue
try:
number = int(row[fields[1]])
except (ValueError, TypeError):
continue
tdate = _str_to_date(row['date'], data['date'])
if (tdate is None):
continue
res0 = {'amount': float(row['amount']), 'date': tdate, 'id': '{:0>16s}.{:04d}.{:04d}'.format(data['account'], data['id'], number), 'description': row['desc2'], 'extras': row['desc1']}
if (RE_CARD.match(row['desc3']) is None):
res0['description'] += (' ' + row['desc3'])
else:
res0['extras'] += (' ' + row['desc3'])
if (' kontant' in res0['extras'].lower()):
(res0['extras'], res0['description']) = (res0['description'], res0['extras'])
if (res0['description'] == ):
res0['description'] = res0['extras']
res0['extras'] =
(yield res0) | @task
def read_cheque_csv(filename: str) -> Table:
fields = ['type', 'nr', 'date', 'desc1', 'desc2', 'desc3', 'amount', 'saldo', 'koste']
data = {}
with open(filename, newline=, encoding='utf8') as csvfile:
csvreader = csv.DictReader(csvfile, fields)
for row in csvreader:
try:
rowtype = int(row[fields[0]])
except ValueError as err:
raise InvalidFile('read_cheque not a cheque file [{}]'.format(os.path.basename(filename))) from err
if (rowtype == 2):
data['account'] = row[fields[1]]
if (rowtype == 3):
try:
data['id'] = int(row[fields[1]])
except (ValueError, TypeError):
continue
data['date'] = _str_to_date(row[fields[2]])
if (rowtype != 5):
continue
try:
number = int(row[fields[1]])
except (ValueError, TypeError):
continue
tdate = _str_to_date(row['date'], data['date'])
if (tdate is None):
continue
res0 = {'amount': float(row['amount']), 'date': tdate, 'id': '{:0>16s}.{:04d}.{:04d}'.format(data['account'], data['id'], number), 'description': row['desc2'], 'extras': row['desc1']}
if (RE_CARD.match(row['desc3']) is None):
res0['description'] += (' ' + row['desc3'])
else:
res0['extras'] += (' ' + row['desc3'])
if (' kontant' in res0['extras'].lower()):
(res0['extras'], res0['description']) = (res0['description'], res0['extras'])
if (res0['description'] == ):
res0['description'] = res0['extras']
res0['extras'] =
(yield res0)<|docstring|>Read an FNB cheque csv file.<|endoftext|> |
66517cb584438c6b01933db9e183c93f55e595ade04d79b81698eb629e3b8121 | def _get_id(acc, month):
'Return an ID.'
if (acc is None):
acc = '0'
TX_IDS[(acc, month)] = (TX_IDS.get((acc, month), 0) + 1)
return '{:0>16s}.{:04d}.{:04d}'.format(str(acc), month, TX_IDS[(acc, month)]) | Return an ID. | dataplaybook/tasks/fnb.py | _get_id | kellerza/data-playbook | 3 | python | def _get_id(acc, month):
if (acc is None):
acc = '0'
TX_IDS[(acc, month)] = (TX_IDS.get((acc, month), 0) + 1)
return '{:0>16s}.{:04d}.{:04d}'.format(str(acc), month, TX_IDS[(acc, month)]) | def _get_id(acc, month):
if (acc is None):
acc = '0'
TX_IDS[(acc, month)] = (TX_IDS.get((acc, month), 0) + 1)
return '{:0>16s}.{:04d}.{:04d}'.format(str(acc), month, TX_IDS[(acc, month)])<|docstring|>Return an ID.<|endoftext|> |
d51390ee73f34fad94134a01e2be2f1f2eb7779b10028624e9f47f45ee6de1a8 | def _clean(row):
'Strip space in row description.'
if isinstance(row['description'], str):
row['description'] = ' '.join(row['description'].split())
elif isinstance(row['description'], (int, float)):
row['description'] = ("''" + str(row['description']))
else:
_LOGGER.info('type %s', str(type(row['description'])))
if ((not row['description']) and ('extras' in row)):
row['description'] = row.pop('extras')
return _clean(row)
return row | Strip space in row description. | dataplaybook/tasks/fnb.py | _clean | kellerza/data-playbook | 3 | python | def _clean(row):
if isinstance(row['description'], str):
row['description'] = ' '.join(row['description'].split())
elif isinstance(row['description'], (int, float)):
row['description'] = ( + str(row['description']))
else:
_LOGGER.info('type %s', str(type(row['description'])))
if ((not row['description']) and ('extras' in row)):
row['description'] = row.pop('extras')
return _clean(row)
return row | def _clean(row):
if isinstance(row['description'], str):
row['description'] = ' '.join(row['description'].split())
elif isinstance(row['description'], (int, float)):
row['description'] = ( + str(row['description']))
else:
_LOGGER.info('type %s', str(type(row['description'])))
if ((not row['description']) and ('extras' in row)):
row['description'] = row.pop('extras')
return _clean(row)
return row<|docstring|>Strip space in row description.<|endoftext|> |
941581da80151d9a991b10c6a0f10955e10954b165c89820916a6c55d3450a55 | @task
def fnb_process(tables: Dict[(str, Table)]) -> Table:
'Add the budget month and ID.'
for (_, t_table) in tables.items():
for row in t_table:
if (not any(row.values())):
continue
try:
row['month'] = _budget_date(row['date'])
except TypeError:
_LOGGER.warning('Skip row %s', row)
continue
if (('from' in row) and ('to' in row)):
row['id'] = _get_id('custom', row['month'].month)
f_t = (('# ' + str(row.pop('to', ''))), ('# ' + str(row.pop('from', ''))))
(row['extras'], row['description']) = f_t
(yield _clean(row))
row = row.copy()
(row['description'], row['extras']) = f_t
row['amount'] = (- (row['amount'] or 0))
(yield _clean(row))
elif ('id' in row):
(yield _clean(row))
else:
row['id'] = _get_id(row.pop('card', ''), row['month'].month)
row['extras'] = row.pop('place', '')
try:
row['amount'] = (- float(str(row['amount']).replace(',', '')))
except ValueError as exc:
raise ValueError('Error in {}: {}'.format(row['id'], exc)) from exc
(yield _clean(row)) | Add the budget month and ID. | dataplaybook/tasks/fnb.py | fnb_process | kellerza/data-playbook | 3 | python | @task
def fnb_process(tables: Dict[(str, Table)]) -> Table:
for (_, t_table) in tables.items():
for row in t_table:
if (not any(row.values())):
continue
try:
row['month'] = _budget_date(row['date'])
except TypeError:
_LOGGER.warning('Skip row %s', row)
continue
if (('from' in row) and ('to' in row)):
row['id'] = _get_id('custom', row['month'].month)
f_t = (('# ' + str(row.pop('to', ))), ('# ' + str(row.pop('from', ))))
(row['extras'], row['description']) = f_t
(yield _clean(row))
row = row.copy()
(row['description'], row['extras']) = f_t
row['amount'] = (- (row['amount'] or 0))
(yield _clean(row))
elif ('id' in row):
(yield _clean(row))
else:
row['id'] = _get_id(row.pop('card', ), row['month'].month)
row['extras'] = row.pop('place', )
try:
row['amount'] = (- float(str(row['amount']).replace(',', )))
except ValueError as exc:
raise ValueError('Error in {}: {}'.format(row['id'], exc)) from exc
(yield _clean(row)) | @task
def fnb_process(tables: Dict[(str, Table)]) -> Table:
for (_, t_table) in tables.items():
for row in t_table:
if (not any(row.values())):
continue
try:
row['month'] = _budget_date(row['date'])
except TypeError:
_LOGGER.warning('Skip row %s', row)
continue
if (('from' in row) and ('to' in row)):
row['id'] = _get_id('custom', row['month'].month)
f_t = (('# ' + str(row.pop('to', ))), ('# ' + str(row.pop('from', ))))
(row['extras'], row['description']) = f_t
(yield _clean(row))
row = row.copy()
(row['description'], row['extras']) = f_t
row['amount'] = (- (row['amount'] or 0))
(yield _clean(row))
elif ('id' in row):
(yield _clean(row))
else:
row['id'] = _get_id(row.pop('card', ), row['month'].month)
row['extras'] = row.pop('place', )
try:
row['amount'] = (- float(str(row['amount']).replace(',', )))
except ValueError as exc:
raise ValueError('Error in {}: {}'.format(row['id'], exc)) from exc
(yield _clean(row))<|docstring|>Add the budget month and ID.<|endoftext|> |
f2c8d46e3ef7970b35eceaa85d8671a327f77f5369442910a5a01bccb1708c73 | def _count_it(gen, retval):
'Count items yielded.'
retval['count'] = 0
for val in gen:
retval['count'] += 1
(yield val)
retval['total'] = (retval.get('total', 0) + retval['count']) | Count items yielded. | dataplaybook/tasks/fnb.py | _count_it | kellerza/data-playbook | 3 | python | def _count_it(gen, retval):
retval['count'] = 0
for val in gen:
retval['count'] += 1
(yield val)
retval['total'] = (retval.get('total', 0) + retval['count']) | def _count_it(gen, retval):
retval['count'] = 0
for val in gen:
retval['count'] += 1
(yield val)
retval['total'] = (retval.get('total', 0) + retval['count'])<|docstring|>Count items yielded.<|endoftext|> |
a735da0aa822e025e666bd87c36a74a6468f531ad1f4e9b8f409c49815b68b19 | @task
def fnb_read_folder(folder: str, pattern: Optional[str]='*.csv') -> Table:
'Read all files in folder.'
path = Path(folder)
files = sorted(path.glob(pattern))
_LOGGER.info('Open %s files', len(files))
retval = {}
for filename in files:
try:
try:
(yield from _count_it(read_cheque_csv(filename=str(filename)), retval))
_LOGGER.info('Loaded %s lines from %s', retval['count'], filename)
continue
except InvalidFile:
pass
_LOGGER.warning('Could not load %s', filename)
except Exception:
_LOGGER.error('Could not read %s: %s', filename, traceback.print_exc())
_LOGGER.info('Success with %s lines', retval.get('total', 0)) | Read all files in folder. | dataplaybook/tasks/fnb.py | fnb_read_folder | kellerza/data-playbook | 3 | python | @task
def fnb_read_folder(folder: str, pattern: Optional[str]='*.csv') -> Table:
path = Path(folder)
files = sorted(path.glob(pattern))
_LOGGER.info('Open %s files', len(files))
retval = {}
for filename in files:
try:
try:
(yield from _count_it(read_cheque_csv(filename=str(filename)), retval))
_LOGGER.info('Loaded %s lines from %s', retval['count'], filename)
continue
except InvalidFile:
pass
_LOGGER.warning('Could not load %s', filename)
except Exception:
_LOGGER.error('Could not read %s: %s', filename, traceback.print_exc())
_LOGGER.info('Success with %s lines', retval.get('total', 0)) | @task
def fnb_read_folder(folder: str, pattern: Optional[str]='*.csv') -> Table:
path = Path(folder)
files = sorted(path.glob(pattern))
_LOGGER.info('Open %s files', len(files))
retval = {}
for filename in files:
try:
try:
(yield from _count_it(read_cheque_csv(filename=str(filename)), retval))
_LOGGER.info('Loaded %s lines from %s', retval['count'], filename)
continue
except InvalidFile:
pass
_LOGGER.warning('Could not load %s', filename)
except Exception:
_LOGGER.error('Could not read %s: %s', filename, traceback.print_exc())
_LOGGER.info('Success with %s lines', retval.get('total', 0))<|docstring|>Read all files in folder.<|endoftext|> |
bdc52cc79017e1dccdf9ec151db82b9400be3a22b7513ac15f5627e4395fb839 | def n_annotators(self):
'Method returning the number of annotators.\n\n Returns\n -------\n n_annotators: int\n Number of BaseAnnot.\n '
return self.n_annotators_ | Method returning the number of annotators.
Returns
-------
n_annotators: int
Number of BaseAnnot. | annotlib/standard.py | n_annotators | mherde/annotlib | 3 | python | def n_annotators(self):
'Method returning the number of annotators.\n\n Returns\n -------\n n_annotators: int\n Number of BaseAnnot.\n '
return self.n_annotators_ | def n_annotators(self):
'Method returning the number of annotators.\n\n Returns\n -------\n n_annotators: int\n Number of BaseAnnot.\n '
return self.n_annotators_<|docstring|>Method returning the number of annotators.
Returns
-------
n_annotators: int
Number of BaseAnnot.<|endoftext|> |
6ca66d9beaaae0e8e22b2b471ebc1401b32d760144b09307b30f36a3b0f4dbb6 | def n_queries(self):
'Method returning the number of queries posed to an annotator.\n\n Returns\n -------\n n_queries_: numpy.ndarray, shape (n_annotators)\n An entry n_queries_[a] indicates how many queries annotator a has processed.\n '
return self.n_queries_ | Method returning the number of queries posed to an annotator.
Returns
-------
n_queries_: numpy.ndarray, shape (n_annotators)
An entry n_queries_[a] indicates how many queries annotator a has processed. | annotlib/standard.py | n_queries | mherde/annotlib | 3 | python | def n_queries(self):
'Method returning the number of queries posed to an annotator.\n\n Returns\n -------\n n_queries_: numpy.ndarray, shape (n_annotators)\n An entry n_queries_[a] indicates how many queries annotator a has processed.\n '
return self.n_queries_ | def n_queries(self):
'Method returning the number of queries posed to an annotator.\n\n Returns\n -------\n n_queries_: numpy.ndarray, shape (n_annotators)\n An entry n_queries_[a] indicates how many queries annotator a has processed.\n '
return self.n_queries_<|docstring|>Method returning the number of queries posed to an annotator.
Returns
-------
n_queries_: numpy.ndarray, shape (n_annotators)
An entry n_queries_[a] indicates how many queries annotator a has processed.<|endoftext|> |
d776890e40f83d7a0014a58ad9bdea0fcf29a460bb6dc25f985964a55f1ecb5b | def queried_samples(self):
'Method returning the samples for which the annotators were queried to provide class labels.\n\n Returns\n -------\n X_queried: list, shape (n_annotators, n_samples, n_features)\n An entry X_queried_[a] represents the samples for which the annotator a was queried to provide class labels.\n '
return [self.X_[self.queried_flags_[(:, a)]] for a in range(self.n_annotators())] | Method returning the samples for which the annotators were queried to provide class labels.
Returns
-------
X_queried: list, shape (n_annotators, n_samples, n_features)
An entry X_queried_[a] represents the samples for which the annotator a was queried to provide class labels. | annotlib/standard.py | queried_samples | mherde/annotlib | 3 | python | def queried_samples(self):
'Method returning the samples for which the annotators were queried to provide class labels.\n\n Returns\n -------\n X_queried: list, shape (n_annotators, n_samples, n_features)\n An entry X_queried_[a] represents the samples for which the annotator a was queried to provide class labels.\n '
return [self.X_[self.queried_flags_[(:, a)]] for a in range(self.n_annotators())] | def queried_samples(self):
'Method returning the samples for which the annotators were queried to provide class labels.\n\n Returns\n -------\n X_queried: list, shape (n_annotators, n_samples, n_features)\n An entry X_queried_[a] represents the samples for which the annotator a was queried to provide class labels.\n '
return [self.X_[self.queried_flags_[(:, a)]] for a in range(self.n_annotators())]<|docstring|>Method returning the samples for which the annotators were queried to provide class labels.
Returns
-------
X_queried: list, shape (n_annotators, n_samples, n_features)
An entry X_queried_[a] represents the samples for which the annotator a was queried to provide class labels.<|endoftext|> |
30ba20058c4e62223dfedd4bbfdf1c4b214edb1a30451fcdde680b6443498276 | def class_labels(self, X, annotator_ids=None, query_value=1, **kwargs):
'Method returning the class labels of the given samples.\n If the query value is greater than zero, it updates the n_queries and queried sample statistics\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose class labels are queried.\n query_value: int\n The query value represents the increment of the query statistics of the queried annotators.\n\n Returns\n -------\n Y: numpy.ndarray, shape (n_samples, n_annotators)\n Class labels of the given samples which were provided by the queried annotators.\n The non queried annotators return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
Y = np.full((np.size(X, 0), self.n_annotators()), np.nan)
Y[(sample_ids_flag, annotator_ids[(:, None)])] = self.Y_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
if (query_value > 0):
self.queried_flags_[(sample_ids, annotator_ids[(:, None)])] = True
self.n_queries_[annotator_ids] += query_value
return Y | Method returning the class labels of the given samples.
If the query value is greater than zero, it updates the n_queries and queried sample statistics
Parameters
----------
X: array-like, shape (n_samples, n_features)
Samples whose class labels are queried.
annotator_ids: array-like, shape (n_queried_annotators)
The indices of the annotators whose class labels are queried.
query_value: int
The query value represents the increment of the query statistics of the queried annotators.
Returns
-------
Y: numpy.ndarray, shape (n_samples, n_annotators)
Class labels of the given samples which were provided by the queried annotators.
The non queried annotators return np.nan values. | annotlib/standard.py | class_labels | mherde/annotlib | 3 | python | def class_labels(self, X, annotator_ids=None, query_value=1, **kwargs):
'Method returning the class labels of the given samples.\n If the query value is greater than zero, it updates the n_queries and queried sample statistics\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose class labels are queried.\n query_value: int\n The query value represents the increment of the query statistics of the queried annotators.\n\n Returns\n -------\n Y: numpy.ndarray, shape (n_samples, n_annotators)\n Class labels of the given samples which were provided by the queried annotators.\n The non queried annotators return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
Y = np.full((np.size(X, 0), self.n_annotators()), np.nan)
Y[(sample_ids_flag, annotator_ids[(:, None)])] = self.Y_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
if (query_value > 0):
self.queried_flags_[(sample_ids, annotator_ids[(:, None)])] = True
self.n_queries_[annotator_ids] += query_value
return Y | def class_labels(self, X, annotator_ids=None, query_value=1, **kwargs):
'Method returning the class labels of the given samples.\n If the query value is greater than zero, it updates the n_queries and queried sample statistics\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose class labels are queried.\n query_value: int\n The query value represents the increment of the query statistics of the queried annotators.\n\n Returns\n -------\n Y: numpy.ndarray, shape (n_samples, n_annotators)\n Class labels of the given samples which were provided by the queried annotators.\n The non queried annotators return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
Y = np.full((np.size(X, 0), self.n_annotators()), np.nan)
Y[(sample_ids_flag, annotator_ids[(:, None)])] = self.Y_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
if (query_value > 0):
self.queried_flags_[(sample_ids, annotator_ids[(:, None)])] = True
self.n_queries_[annotator_ids] += query_value
return Y<|docstring|>Method returning the class labels of the given samples.
If the query value is greater than zero, it updates the n_queries and queried sample statistics
Parameters
----------
X: array-like, shape (n_samples, n_features)
Samples whose class labels are queried.
annotator_ids: array-like, shape (n_queried_annotators)
The indices of the annotators whose class labels are queried.
query_value: int
The query value represents the increment of the query statistics of the queried annotators.
Returns
-------
Y: numpy.ndarray, shape (n_samples, n_annotators)
Class labels of the given samples which were provided by the queried annotators.
The non queried annotators return np.nan values.<|endoftext|> |
31c285fc1317c8abb83b5874640729c2c4df8680740a79c904712f02a7a86187 | def confidence_scores(self, X, annotator_ids=None, **kwargs):
'Method returning the confidence scores for labelling the given samples.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose confidence scores are queried.\n\n Returns\n -------\n C: numpy.ndarray, shape (n_samples, n_annotators)\n confidence scores of the queried annotators for labelling the given samples.\n The non queried annotators should return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
C = np.full((np.size(X, 0), self.n_annotators()), np.nan)
C[(sample_ids_flag, annotator_ids[(:, None)])] = self.C_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
return C | Method returning the confidence scores for labelling the given samples.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Samples whose class labels are queried.
annotator_ids: array-like, shape (n_queried_annotators)
The indices of the annotators whose confidence scores are queried.
Returns
-------
C: numpy.ndarray, shape (n_samples, n_annotators)
confidence scores of the queried annotators for labelling the given samples.
The non queried annotators should return np.nan values. | annotlib/standard.py | confidence_scores | mherde/annotlib | 3 | python | def confidence_scores(self, X, annotator_ids=None, **kwargs):
'Method returning the confidence scores for labelling the given samples.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose confidence scores are queried.\n\n Returns\n -------\n C: numpy.ndarray, shape (n_samples, n_annotators)\n confidence scores of the queried annotators for labelling the given samples.\n The non queried annotators should return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
C = np.full((np.size(X, 0), self.n_annotators()), np.nan)
C[(sample_ids_flag, annotator_ids[(:, None)])] = self.C_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
return C | def confidence_scores(self, X, annotator_ids=None, **kwargs):
'Method returning the confidence scores for labelling the given samples.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n Samples whose class labels are queried.\n annotator_ids: array-like, shape (n_queried_annotators)\n The indices of the annotators whose confidence scores are queried.\n\n Returns\n -------\n C: numpy.ndarray, shape (n_samples, n_annotators)\n confidence scores of the queried annotators for labelling the given samples.\n The non queried annotators should return np.nan values.\n '
annotator_ids = check_indices(annotator_ids, (self.n_annotators() - 1), 'annotator_ids')
X = check_array(X)
sample_ids = indices(self.X_, X, missing=(- 1))
sample_ids_flag = (sample_ids >= 0)
C = np.full((np.size(X, 0), self.n_annotators()), np.nan)
C[(sample_ids_flag, annotator_ids[(:, None)])] = self.C_[(sample_ids[sample_ids_flag], annotator_ids[(:, None)])]
return C<|docstring|>Method returning the confidence scores for labelling the given samples.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Samples whose class labels are queried.
annotator_ids: array-like, shape (n_queried_annotators)
The indices of the annotators whose confidence scores are queried.
Returns
-------
C: numpy.ndarray, shape (n_samples, n_annotators)
confidence scores of the queried annotators for labelling the given samples.
The non queried annotators should return np.nan values.<|endoftext|> |
0f10ba369e48e808cf8e821c168017bb74f9b17e3754a77e774b65a92d8429df | def _check_parameters(self, n_annotators, n_samples, confidence_noise, random_state):
'\n This method is responsible for checking several parameters and to set them as attributes.\n\n Parameters\n ----------\n n_annotators: int\n Number of annotators.\n n_samples: int\n Number of samples.\n confidence_noise: array-like, shape (n_samples)\n Noise of the confidence scores of each annotator.\n random_state: None | int | instance of :py:class:`numpy.random.RandomState`\n The random state used for generating class labels of the annotators.\n '
self.n_annotators_ = check_positive_integer(n_annotators, parameter_name='n_annotators')
self.n_queries_ = column_or_1d(np.asarray(([0] * self.n_annotators())))
self.queried_flags_ = np.zeros((n_samples, n_annotators), dtype=bool)
self.confidence_noise_ = (np.zeros(self.n_annotators()) if (confidence_noise is None) else confidence_noise)
self.confidence_noise_ = column_or_1d(self.confidence_noise_)
if (len(self.confidence_noise_) != self.n_annotators()):
raise ValueError('The number of elements in `confidence_noise` must be a equal to the number of annotators.')
self.random_state_ = check_random_state(random_state)
self.C_noise_ = np.asarray([self.random_state_.uniform((- self.confidence_noise_[a]), self.confidence_noise_[a], n_samples) for a in range(self.n_annotators())]).T | This method is responsible for checking several parameters and to set them as attributes.
Parameters
----------
n_annotators: int
Number of annotators.
n_samples: int
Number of samples.
confidence_noise: array-like, shape (n_samples)
Noise of the confidence scores of each annotator.
random_state: None | int | instance of :py:class:`numpy.random.RandomState`
The random state used for generating class labels of the annotators. | annotlib/standard.py | _check_parameters | mherde/annotlib | 3 | python | def _check_parameters(self, n_annotators, n_samples, confidence_noise, random_state):
'\n This method is responsible for checking several parameters and to set them as attributes.\n\n Parameters\n ----------\n n_annotators: int\n Number of annotators.\n n_samples: int\n Number of samples.\n confidence_noise: array-like, shape (n_samples)\n Noise of the confidence scores of each annotator.\n random_state: None | int | instance of :py:class:`numpy.random.RandomState`\n The random state used for generating class labels of the annotators.\n '
self.n_annotators_ = check_positive_integer(n_annotators, parameter_name='n_annotators')
self.n_queries_ = column_or_1d(np.asarray(([0] * self.n_annotators())))
self.queried_flags_ = np.zeros((n_samples, n_annotators), dtype=bool)
self.confidence_noise_ = (np.zeros(self.n_annotators()) if (confidence_noise is None) else confidence_noise)
self.confidence_noise_ = column_or_1d(self.confidence_noise_)
if (len(self.confidence_noise_) != self.n_annotators()):
raise ValueError('The number of elements in `confidence_noise` must be a equal to the number of annotators.')
self.random_state_ = check_random_state(random_state)
self.C_noise_ = np.asarray([self.random_state_.uniform((- self.confidence_noise_[a]), self.confidence_noise_[a], n_samples) for a in range(self.n_annotators())]).T | def _check_parameters(self, n_annotators, n_samples, confidence_noise, random_state):
'\n This method is responsible for checking several parameters and to set them as attributes.\n\n Parameters\n ----------\n n_annotators: int\n Number of annotators.\n n_samples: int\n Number of samples.\n confidence_noise: array-like, shape (n_samples)\n Noise of the confidence scores of each annotator.\n random_state: None | int | instance of :py:class:`numpy.random.RandomState`\n The random state used for generating class labels of the annotators.\n '
self.n_annotators_ = check_positive_integer(n_annotators, parameter_name='n_annotators')
self.n_queries_ = column_or_1d(np.asarray(([0] * self.n_annotators())))
self.queried_flags_ = np.zeros((n_samples, n_annotators), dtype=bool)
self.confidence_noise_ = (np.zeros(self.n_annotators()) if (confidence_noise is None) else confidence_noise)
self.confidence_noise_ = column_or_1d(self.confidence_noise_)
if (len(self.confidence_noise_) != self.n_annotators()):
raise ValueError('The number of elements in `confidence_noise` must be a equal to the number of annotators.')
self.random_state_ = check_random_state(random_state)
self.C_noise_ = np.asarray([self.random_state_.uniform((- self.confidence_noise_[a]), self.confidence_noise_[a], n_samples) for a in range(self.n_annotators())]).T<|docstring|>This method is responsible for checking several parameters and to set them as attributes.
Parameters
----------
n_annotators: int
Number of annotators.
n_samples: int
Number of samples.
confidence_noise: array-like, shape (n_samples)
Noise of the confidence scores of each annotator.
random_state: None | int | instance of :py:class:`numpy.random.RandomState`
The random state used for generating class labels of the annotators.<|endoftext|> |
e9f46c608f7832e3f79284c8ffbb7396e54cb6bb2029d75045e9e55124e5412e | def _add_confidence_noise(self, probabilistic=False):
'\n Add the uniform confidence noise to the confidence scores.\n\n Parameters\n ----------\n probabilistic: boolean\n If true, the confidence scores are in the interval [0, 1].\n '
self.C_ += self.C_noise_
if probabilistic:
self.C_[(self.C_ > 1)] = 1
self.C_[(self.C_ < 0)] = 0 | Add the uniform confidence noise to the confidence scores.
Parameters
----------
probabilistic: boolean
If true, the confidence scores are in the interval [0, 1]. | annotlib/standard.py | _add_confidence_noise | mherde/annotlib | 3 | python | def _add_confidence_noise(self, probabilistic=False):
'\n Add the uniform confidence noise to the confidence scores.\n\n Parameters\n ----------\n probabilistic: boolean\n If true, the confidence scores are in the interval [0, 1].\n '
self.C_ += self.C_noise_
if probabilistic:
self.C_[(self.C_ > 1)] = 1
self.C_[(self.C_ < 0)] = 0 | def _add_confidence_noise(self, probabilistic=False):
'\n Add the uniform confidence noise to the confidence scores.\n\n Parameters\n ----------\n probabilistic: boolean\n If true, the confidence scores are in the interval [0, 1].\n '
self.C_ += self.C_noise_
if probabilistic:
self.C_[(self.C_ > 1)] = 1
self.C_[(self.C_ < 0)] = 0<|docstring|>Add the uniform confidence noise to the confidence scores.
Parameters
----------
probabilistic: boolean
If true, the confidence scores are in the interval [0, 1].<|endoftext|> |
bc74f95dfaad24fa8a594b8f49fed4d72f73cbeb4d1d882480c5dbd2dd36ebe6 | def __init__(self, master, dims: tuple=(150, 100)):
'\n Constructor for the scoreboard canvas.\n :param master: The master frame to pack to.\n :param dims: The dimensions of the scoreboard. Defaults to (150, 100)\n '
tkinter.Canvas.__init__(self, master)
self.master = master
self.width = dims[0]
self.height = dims[1]
self.config(height=self.height, width=self.width)
self.player1_name = None
self.player2_name = None
self.score_box_1 = None
self.score_box_2 = None
self.score_1 = 0
self.score_2 = 0
self.create_ui() | Constructor for the scoreboard canvas.
:param master: The master frame to pack to.
:param dims: The dimensions of the scoreboard. Defaults to (150, 100) | View/Scoreboard.py | __init__ | sahil1105/BattleshipGame | 0 | python | def __init__(self, master, dims: tuple=(150, 100)):
'\n Constructor for the scoreboard canvas.\n :param master: The master frame to pack to.\n :param dims: The dimensions of the scoreboard. Defaults to (150, 100)\n '
tkinter.Canvas.__init__(self, master)
self.master = master
self.width = dims[0]
self.height = dims[1]
self.config(height=self.height, width=self.width)
self.player1_name = None
self.player2_name = None
self.score_box_1 = None
self.score_box_2 = None
self.score_1 = 0
self.score_2 = 0
self.create_ui() | def __init__(self, master, dims: tuple=(150, 100)):
'\n Constructor for the scoreboard canvas.\n :param master: The master frame to pack to.\n :param dims: The dimensions of the scoreboard. Defaults to (150, 100)\n '
tkinter.Canvas.__init__(self, master)
self.master = master
self.width = dims[0]
self.height = dims[1]
self.config(height=self.height, width=self.width)
self.player1_name = None
self.player2_name = None
self.score_box_1 = None
self.score_box_2 = None
self.score_1 = 0
self.score_2 = 0
self.create_ui()<|docstring|>Constructor for the scoreboard canvas.
:param master: The master frame to pack to.
:param dims: The dimensions of the scoreboard. Defaults to (150, 100)<|endoftext|> |
3789aa374c1205e2ef0b681693ad8be5866e18b91a7b9f4b5531911c01b1dad1 | def create_ui(self):
'\n Utility function to add the relevant elements to the scoreboard UI.\n :return: None\n '
self.player1_name = self.create_player_name_box(int((self.width / 4)), int((self.height / 4)), 'You')
self.player2_name = self.create_player_name_box(int(((3 * self.width) / 4)), int((self.height / 4)), 'Opponent')
self.score_box_1 = self.create_score_box(int((self.width / 4)), int(((3 * self.height) / 4)), color='blue')
self.score_box_2 = self.create_score_box(int(((3 * self.width) / 4)), int(((3 * self.height) / 4)), color='red') | Utility function to add the relevant elements to the scoreboard UI.
:return: None | View/Scoreboard.py | create_ui | sahil1105/BattleshipGame | 0 | python | def create_ui(self):
'\n Utility function to add the relevant elements to the scoreboard UI.\n :return: None\n '
self.player1_name = self.create_player_name_box(int((self.width / 4)), int((self.height / 4)), 'You')
self.player2_name = self.create_player_name_box(int(((3 * self.width) / 4)), int((self.height / 4)), 'Opponent')
self.score_box_1 = self.create_score_box(int((self.width / 4)), int(((3 * self.height) / 4)), color='blue')
self.score_box_2 = self.create_score_box(int(((3 * self.width) / 4)), int(((3 * self.height) / 4)), color='red') | def create_ui(self):
'\n Utility function to add the relevant elements to the scoreboard UI.\n :return: None\n '
self.player1_name = self.create_player_name_box(int((self.width / 4)), int((self.height / 4)), 'You')
self.player2_name = self.create_player_name_box(int(((3 * self.width) / 4)), int((self.height / 4)), 'Opponent')
self.score_box_1 = self.create_score_box(int((self.width / 4)), int(((3 * self.height) / 4)), color='blue')
self.score_box_2 = self.create_score_box(int(((3 * self.width) / 4)), int(((3 * self.height) / 4)), color='red')<|docstring|>Utility function to add the relevant elements to the scoreboard UI.
:return: None<|endoftext|> |
d1186306865ec1be57140c766b2074f7a4189c566eddc1787dc06766f3559987 | def create_player_name_box(self, x: int, y: int, text='You', color='green'):
"\n Utility function to add a box on the canvas with a player's name.\n :param x: The x coordinate to place the name at.\n :param y: The y coordinate to place the name at.\n :param text: The text to place.\n :param color: The color to use for the text.\n :return: A reference to the text box created.\n "
return self.create_text(x, y, text=text, fill=color, width=int((self.width / 2))) | Utility function to add a box on the canvas with a player's name.
:param x: The x coordinate to place the name at.
:param y: The y coordinate to place the name at.
:param text: The text to place.
:param color: The color to use for the text.
:return: A reference to the text box created. | View/Scoreboard.py | create_player_name_box | sahil1105/BattleshipGame | 0 | python | def create_player_name_box(self, x: int, y: int, text='You', color='green'):
"\n Utility function to add a box on the canvas with a player's name.\n :param x: The x coordinate to place the name at.\n :param y: The y coordinate to place the name at.\n :param text: The text to place.\n :param color: The color to use for the text.\n :return: A reference to the text box created.\n "
return self.create_text(x, y, text=text, fill=color, width=int((self.width / 2))) | def create_player_name_box(self, x: int, y: int, text='You', color='green'):
"\n Utility function to add a box on the canvas with a player's name.\n :param x: The x coordinate to place the name at.\n :param y: The y coordinate to place the name at.\n :param text: The text to place.\n :param color: The color to use for the text.\n :return: A reference to the text box created.\n "
return self.create_text(x, y, text=text, fill=color, width=int((self.width / 2)))<|docstring|>Utility function to add a box on the canvas with a player's name.
:param x: The x coordinate to place the name at.
:param y: The y coordinate to place the name at.
:param text: The text to place.
:param color: The color to use for the text.
:return: A reference to the text box created.<|endoftext|> |
2ff3ad6e0d125ddc24fe5156eae2d5048062ad6a82fb273d2a9dd103f619fce8 | def create_score_box(self, x, y, color='blue'):
'\n Utility function to add a box to show the score of a player.\n Score starts out at 0.\n :param x: The x coordinate to place the score at.\n :param y: The y coordinate to place the score at.\n :param color: The color to use for the score.\n :return: Reference to the text box for the score.\n '
return self.create_text(x, y, text='0', fill=color, width=int((self.width / 4))) | Utility function to add a box to show the score of a player.
Score starts out at 0.
:param x: The x coordinate to place the score at.
:param y: The y coordinate to place the score at.
:param color: The color to use for the score.
:return: Reference to the text box for the score. | View/Scoreboard.py | create_score_box | sahil1105/BattleshipGame | 0 | python | def create_score_box(self, x, y, color='blue'):
'\n Utility function to add a box to show the score of a player.\n Score starts out at 0.\n :param x: The x coordinate to place the score at.\n :param y: The y coordinate to place the score at.\n :param color: The color to use for the score.\n :return: Reference to the text box for the score.\n '
return self.create_text(x, y, text='0', fill=color, width=int((self.width / 4))) | def create_score_box(self, x, y, color='blue'):
'\n Utility function to add a box to show the score of a player.\n Score starts out at 0.\n :param x: The x coordinate to place the score at.\n :param y: The y coordinate to place the score at.\n :param color: The color to use for the score.\n :return: Reference to the text box for the score.\n '
return self.create_text(x, y, text='0', fill=color, width=int((self.width / 4)))<|docstring|>Utility function to add a box to show the score of a player.
Score starts out at 0.
:param x: The x coordinate to place the score at.
:param y: The y coordinate to place the score at.
:param color: The color to use for the score.
:return: Reference to the text box for the score.<|endoftext|> |
5fe180c0863bb78cc88876504b5c9fd5ae7b77b96a121d5854797dfd98d76769 | def set_player_1_name(self, name):
'\n Utility function to set the name of the first player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player1_name, name) | Utility function to set the name of the first player.
:param name: Name to set.
:return: None | View/Scoreboard.py | set_player_1_name | sahil1105/BattleshipGame | 0 | python | def set_player_1_name(self, name):
'\n Utility function to set the name of the first player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player1_name, name) | def set_player_1_name(self, name):
'\n Utility function to set the name of the first player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player1_name, name)<|docstring|>Utility function to set the name of the first player.
:param name: Name to set.
:return: None<|endoftext|> |
c210b53ed1c991ee0a1c9c45efb5a6c8e33ec0cf4f15fedb87736d0ae63ef047 | def set_player_2_name(self, name):
'\n Utility function to set the name of the second player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player2_name, name) | Utility function to set the name of the second player.
:param name: Name to set.
:return: None | View/Scoreboard.py | set_player_2_name | sahil1105/BattleshipGame | 0 | python | def set_player_2_name(self, name):
'\n Utility function to set the name of the second player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player2_name, name) | def set_player_2_name(self, name):
'\n Utility function to set the name of the second player.\n :param name: Name to set.\n :return: None\n '
self.set_text(self.player2_name, name)<|docstring|>Utility function to set the name of the second player.
:param name: Name to set.
:return: None<|endoftext|> |
83846376bcfb6c60f279d18834782be8f6885ce1e3ea71787e386fb7468e0161 | def set_score_1(self, new_score):
'\n Utility function to set the score of the first player.\n :param new_score: Score to set\n :return: None\n '
self.score_1 = new_score
self.set_text(self.score_box_1, self.score_1) | Utility function to set the score of the first player.
:param new_score: Score to set
:return: None | View/Scoreboard.py | set_score_1 | sahil1105/BattleshipGame | 0 | python | def set_score_1(self, new_score):
'\n Utility function to set the score of the first player.\n :param new_score: Score to set\n :return: None\n '
self.score_1 = new_score
self.set_text(self.score_box_1, self.score_1) | def set_score_1(self, new_score):
'\n Utility function to set the score of the first player.\n :param new_score: Score to set\n :return: None\n '
self.score_1 = new_score
self.set_text(self.score_box_1, self.score_1)<|docstring|>Utility function to set the score of the first player.
:param new_score: Score to set
:return: None<|endoftext|> |
d9306e5a77a870032a722df82c834ddf103704215d0da603f3cb71b8fb7f5196 | def set_score_2(self, new_score):
'\n Utility function to set the score of the second player.\n :param new_score: Score to set\n :return: None\n '
self.score_2 = new_score
self.set_text(self.score_box_2, self.score_2) | Utility function to set the score of the second player.
:param new_score: Score to set
:return: None | View/Scoreboard.py | set_score_2 | sahil1105/BattleshipGame | 0 | python | def set_score_2(self, new_score):
'\n Utility function to set the score of the second player.\n :param new_score: Score to set\n :return: None\n '
self.score_2 = new_score
self.set_text(self.score_box_2, self.score_2) | def set_score_2(self, new_score):
'\n Utility function to set the score of the second player.\n :param new_score: Score to set\n :return: None\n '
self.score_2 = new_score
self.set_text(self.score_box_2, self.score_2)<|docstring|>Utility function to set the score of the second player.
:param new_score: Score to set
:return: None<|endoftext|> |
337b46ecefd3f74c38027660c1c5ce6b3cfa75ed4f174a31d337f42fa30f013e | def increment_score_1(self, inc=1):
'\n Utility function to increment the current score of the first player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_1((self.score_1 + inc)) | Utility function to increment the current score of the first player.
:param inc: Points to increment by. Default=1
:return: None | View/Scoreboard.py | increment_score_1 | sahil1105/BattleshipGame | 0 | python | def increment_score_1(self, inc=1):
'\n Utility function to increment the current score of the first player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_1((self.score_1 + inc)) | def increment_score_1(self, inc=1):
'\n Utility function to increment the current score of the first player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_1((self.score_1 + inc))<|docstring|>Utility function to increment the current score of the first player.
:param inc: Points to increment by. Default=1
:return: None<|endoftext|> |
081c4d760dad148b23007430a3cbe06c4af85fece2ff3d8716fdebcd05d39831 | def increment_score_2(self, inc=1):
'\n Utility function to increment the current score of the second player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_2((self.score_2 + inc)) | Utility function to increment the current score of the second player.
:param inc: Points to increment by. Default=1
:return: None | View/Scoreboard.py | increment_score_2 | sahil1105/BattleshipGame | 0 | python | def increment_score_2(self, inc=1):
'\n Utility function to increment the current score of the second player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_2((self.score_2 + inc)) | def increment_score_2(self, inc=1):
'\n Utility function to increment the current score of the second player.\n :param inc: Points to increment by. Default=1\n :return: None\n '
self.set_score_2((self.score_2 + inc))<|docstring|>Utility function to increment the current score of the second player.
:param inc: Points to increment by. Default=1
:return: None<|endoftext|> |
1f47eeb388278ba506c383066f39bc4124b009e4d7119a7fba2316affe9a6401 | def reset_scores(self):
'\n Utility function to reset the scores of both players to 0.\n :return: None\n '
self.set_score_1(0)
self.set_score_2(0) | Utility function to reset the scores of both players to 0.
:return: None | View/Scoreboard.py | reset_scores | sahil1105/BattleshipGame | 0 | python | def reset_scores(self):
'\n Utility function to reset the scores of both players to 0.\n :return: None\n '
self.set_score_1(0)
self.set_score_2(0) | def reset_scores(self):
'\n Utility function to reset the scores of both players to 0.\n :return: None\n '
self.set_score_1(0)
self.set_score_2(0)<|docstring|>Utility function to reset the scores of both players to 0.
:return: None<|endoftext|> |
fd6225e8f3d3f84078a5bcbb2da860e6cae1d06957183704dbc97e4ef860af3d | def set_text(self, component, text):
'\n Utility function used by above functions to set the text of a component on the canvas.\n :param component: Component whose text to change.\n :param text: Text to set in the component.\n :return: None\n '
self.itemconfig(component, text=str(text)) | Utility function used by above functions to set the text of a component on the canvas.
:param component: Component whose text to change.
:param text: Text to set in the component.
:return: None | View/Scoreboard.py | set_text | sahil1105/BattleshipGame | 0 | python | def set_text(self, component, text):
'\n Utility function used by above functions to set the text of a component on the canvas.\n :param component: Component whose text to change.\n :param text: Text to set in the component.\n :return: None\n '
self.itemconfig(component, text=str(text)) | def set_text(self, component, text):
'\n Utility function used by above functions to set the text of a component on the canvas.\n :param component: Component whose text to change.\n :param text: Text to set in the component.\n :return: None\n '
self.itemconfig(component, text=str(text))<|docstring|>Utility function used by above functions to set the text of a component on the canvas.
:param component: Component whose text to change.
:param text: Text to set in the component.
:return: None<|endoftext|> |
b336c1dbfcf02f63812dd4381d24226334ab8b08f0b5d9c0abbf894cccb76e0c | def _get_default_params() -> Dict[(str, str)]:
'\n Get fake params pointing to a different image so we can test the code\n without affecting the official images.\n '
ecr_base_path = os.environ['AM_ECR_BASE_PATH']
default_params = {'ECR_BASE_PATH': ecr_base_path, 'BASE_IMAGE': 'amp_test', 'DEV_TOOLS_IMAGE_PROD': f'{ecr_base_path}/dev_tools:prod'}
return default_params | Get fake params pointing to a different image so we can test the code
without affecting the official images. | test/test_tasks.py | _get_default_params | ajmal017/amp | 0 | python | def _get_default_params() -> Dict[(str, str)]:
'\n Get fake params pointing to a different image so we can test the code\n without affecting the official images.\n '
ecr_base_path = os.environ['AM_ECR_BASE_PATH']
default_params = {'ECR_BASE_PATH': ecr_base_path, 'BASE_IMAGE': 'amp_test', 'DEV_TOOLS_IMAGE_PROD': f'{ecr_base_path}/dev_tools:prod'}
return default_params | def _get_default_params() -> Dict[(str, str)]:
'\n Get fake params pointing to a different image so we can test the code\n without affecting the official images.\n '
ecr_base_path = os.environ['AM_ECR_BASE_PATH']
default_params = {'ECR_BASE_PATH': ecr_base_path, 'BASE_IMAGE': 'amp_test', 'DEV_TOOLS_IMAGE_PROD': f'{ecr_base_path}/dev_tools:prod'}
return default_params<|docstring|>Get fake params pointing to a different image so we can test the code
without affecting the official images.<|endoftext|> |
0020842a66c1d4cc128d35a42c9f8254a9c071d36f7fa1116435cf38cbfa7b2d | def __init__(self, inipath):
'Initialize an API object with a path to the config file and connect to btc-e.com.'
self.inipath = inipath
self.nonce = 0
config = configparser.ConfigParser()
config.read(inipath)
if (not ('API' in config)):
config.add_section('API')
self.secret = config.get('API', 'secret', fallback='copy API secret here').encode('ascii')
self.key = config.get('API', 'key', fallback='copy API secret here').encode('ascii') | Initialize an API object with a path to the config file and connect to btc-e.com. | BTCe.py | __init__ | devium/BTCeGUI | 1 | python | def __init__(self, inipath):
self.inipath = inipath
self.nonce = 0
config = configparser.ConfigParser()
config.read(inipath)
if (not ('API' in config)):
config.add_section('API')
self.secret = config.get('API', 'secret', fallback='copy API secret here').encode('ascii')
self.key = config.get('API', 'key', fallback='copy API secret here').encode('ascii') | def __init__(self, inipath):
self.inipath = inipath
self.nonce = 0
config = configparser.ConfigParser()
config.read(inipath)
if (not ('API' in config)):
config.add_section('API')
self.secret = config.get('API', 'secret', fallback='copy API secret here').encode('ascii')
self.key = config.get('API', 'key', fallback='copy API secret here').encode('ascii')<|docstring|>Initialize an API object with a path to the config file and connect to btc-e.com.<|endoftext|> |
f99f64d7fa67981aa9af3e4e78811c431c9985e0e39cb8c15b95b92d6e7f12b9 | def request(self, method, extraparams={}):
'Send an API request for method to BTC-e and return a dictionary of the return object.'
self.nonce += 1
params = {'method': method, 'nonce': self.nonce}
params.update(extraparams)
params = urllib.parse.urlencode(params).encode('ascii')
mac = hmac.new(self.secret, digestmod=hashlib.sha512)
mac.update(params)
sign = mac.hexdigest()
response = ''
try:
conn = http.client.HTTPSConnection('btc-e.com', timeout=5)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Key': self.key, 'Sign': sign}
conn.request('POST', '/tapi', params, headers)
response = conn.getresponse().read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
if (j['success'] == 0):
matchnonce = re.match('invalid nonce parameter; on key:(\\d+)', j['error'])
if matchnonce:
self.nonce = int(matchnonce.group(1))
return self.request(method, extraparams)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j | Send an API request for method to BTC-e and return a dictionary of the return object. | BTCe.py | request | devium/BTCeGUI | 1 | python | def request(self, method, extraparams={}):
self.nonce += 1
params = {'method': method, 'nonce': self.nonce}
params.update(extraparams)
params = urllib.parse.urlencode(params).encode('ascii')
mac = hmac.new(self.secret, digestmod=hashlib.sha512)
mac.update(params)
sign = mac.hexdigest()
response =
try:
conn = http.client.HTTPSConnection('btc-e.com', timeout=5)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Key': self.key, 'Sign': sign}
conn.request('POST', '/tapi', params, headers)
response = conn.getresponse().read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
if (j['success'] == 0):
matchnonce = re.match('invalid nonce parameter; on key:(\\d+)', j['error'])
if matchnonce:
self.nonce = int(matchnonce.group(1))
return self.request(method, extraparams)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j | def request(self, method, extraparams={}):
self.nonce += 1
params = {'method': method, 'nonce': self.nonce}
params.update(extraparams)
params = urllib.parse.urlencode(params).encode('ascii')
mac = hmac.new(self.secret, digestmod=hashlib.sha512)
mac.update(params)
sign = mac.hexdigest()
response =
try:
conn = http.client.HTTPSConnection('btc-e.com', timeout=5)
headers = {'Content-type': 'application/x-www-form-urlencoded', 'Key': self.key, 'Sign': sign}
conn.request('POST', '/tapi', params, headers)
response = conn.getresponse().read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
if (j['success'] == 0):
matchnonce = re.match('invalid nonce parameter; on key:(\\d+)', j['error'])
if matchnonce:
self.nonce = int(matchnonce.group(1))
return self.request(method, extraparams)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j<|docstring|>Send an API request for method to BTC-e and return a dictionary of the return object.<|endoftext|> |
0eca34f1a382a8b25ef6e184952a89d29604f6c1808b09476b9ea837c906fd5e | def getinfo(self):
'Request account balance info.'
return self.request('getInfo') | Request account balance info. | BTCe.py | getinfo | devium/BTCeGUI | 1 | python | def getinfo(self):
return self.request('getInfo') | def getinfo(self):
return self.request('getInfo')<|docstring|>Request account balance info.<|endoftext|> |
813d0d2fc982f31743fa3bb976c16d69a8c9a3689ccc1649da52b3f227453527 | def transhistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize):
'Request transaction history.'
return self.request('TransHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end}) | Request transaction history. | BTCe.py | transhistory | devium/BTCeGUI | 1 | python | def transhistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize):
return self.request('TransHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end}) | def transhistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize):
return self.request('TransHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end})<|docstring|>Request transaction history.<|endoftext|> |
73f62b658fe6019e210b50627b986d29894b2a8202ebabc1571dd03fc8726760 | def tradehistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize, pair='', active=1):
'Request trade history.'
return self.request('TradeHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end, 'pair': pair, 'active': active}) | Request trade history. | BTCe.py | tradehistory | devium/BTCeGUI | 1 | python | def tradehistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize, pair=, active=1):
return self.request('TradeHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end, 'pair': pair, 'active': active}) | def tradehistory(self, from_=0, count=1000, fromid=0, endid=sys.maxsize, order='DESC', since=0, end=sys.maxsize, pair=, active=1):
return self.request('TradeHistory', {'from': from_, 'count': count, 'from_id': fromid, 'end_id': endid, 'order': order, 'since': since, 'end': end, 'pair': pair, 'active': active})<|docstring|>Request trade history.<|endoftext|> |
b0206ba6daf1c9519b822a4eff25bbaa1840708bb0f97a572d0f73a1f3eafb50 | def activeorders(self, pair=''):
'Request active orders.'
return self.request('ActiveOrders') | Request active orders. | BTCe.py | activeorders | devium/BTCeGUI | 1 | python | def activeorders(self, pair=):
return self.request('ActiveOrders') | def activeorders(self, pair=):
return self.request('ActiveOrders')<|docstring|>Request active orders.<|endoftext|> |
52628f2b2854ff46b8b456a7a895e8754dd955bc5dc27e46816096463615141a | def trade(self, pair, type, rate, amount):
'Place buy/sell (type) order for amount of given currency pair at rate.'
return self.request('Trade', {'pair': pair, 'type': type, 'rate': rate, 'amount': amount}) | Place buy/sell (type) order for amount of given currency pair at rate. | BTCe.py | trade | devium/BTCeGUI | 1 | python | def trade(self, pair, type, rate, amount):
return self.request('Trade', {'pair': pair, 'type': type, 'rate': rate, 'amount': amount}) | def trade(self, pair, type, rate, amount):
return self.request('Trade', {'pair': pair, 'type': type, 'rate': rate, 'amount': amount})<|docstring|>Place buy/sell (type) order for amount of given currency pair at rate.<|endoftext|> |
107238fa6f6c0c03010475faac121156623f98f336e72c9bd353c8c35231c183 | def cancelorder(self, orderid):
'Cancel order with id orderid.'
return self.request('CancelOrder', {'order_id': orderid}) | Cancel order with id orderid. | BTCe.py | cancelorder | devium/BTCeGUI | 1 | python | def cancelorder(self, orderid):
return self.request('CancelOrder', {'order_id': orderid}) | def cancelorder(self, orderid):
return self.request('CancelOrder', {'order_id': orderid})<|docstring|>Cancel order with id orderid.<|endoftext|> |
eccd25c825c681dc7a4175e8a68e63f261ddf6f849c9e1b2baad62a530c77fd8 | @staticmethod
def query(method, pair=''):
'Query a method of the public BTC-e API.'
response = ''
try:
response = urllib.request.urlopen('http://btc-e.com/api/3/{method}/{pair}'.format(method=method, pair=pair), timeout=5).read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j | Query a method of the public BTC-e API. | BTCe.py | query | devium/BTCeGUI | 1 | python | @staticmethod
def query(method, pair=):
response =
try:
response = urllib.request.urlopen('http://btc-e.com/api/3/{method}/{pair}'.format(method=method, pair=pair), timeout=5).read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j | @staticmethod
def query(method, pair=):
response =
try:
response = urllib.request.urlopen('http://btc-e.com/api/3/{method}/{pair}'.format(method=method, pair=pair), timeout=5).read().decode('utf-8')
except Exception as err:
response = '{{"success" : 0, "error" : "{}"}}'.format(err)
j = {}
try:
j = json.loads(response)
except ValueError:
j = {'success': 0, 'error': 'No valid JSON document received.'}
return j<|docstring|>Query a method of the public BTC-e API.<|endoftext|> |
05924f704eb7d193b734834b287dc918d8de6201705b725d8c18ed4d67fad825 | @staticmethod
def info():
'Query public info method.'
return API.query('info') | Query public info method. | BTCe.py | info | devium/BTCeGUI | 1 | python | @staticmethod
def info():
return API.query('info') | @staticmethod
def info():
return API.query('info')<|docstring|>Query public info method.<|endoftext|> |
71e698255968dbd07668c3ace3dacc929db8eeaf71cfe4902943dff55a99033a | @staticmethod
def ticker(pair):
'Query public ticker method for given currency pair.'
return API.query('ticker', pair) | Query public ticker method for given currency pair. | BTCe.py | ticker | devium/BTCeGUI | 1 | python | @staticmethod
def ticker(pair):
return API.query('ticker', pair) | @staticmethod
def ticker(pair):
return API.query('ticker', pair)<|docstring|>Query public ticker method for given currency pair.<|endoftext|> |
514d235ff4e177b5c3ebe2c35f4308876e281f6fff3b8ffe113ab367339bf217 | @staticmethod
def depth(pair):
'Query public depth method for given currency pair.'
return API.query('depth', pair) | Query public depth method for given currency pair. | BTCe.py | depth | devium/BTCeGUI | 1 | python | @staticmethod
def depth(pair):
return API.query('depth', pair) | @staticmethod
def depth(pair):
return API.query('depth', pair)<|docstring|>Query public depth method for given currency pair.<|endoftext|> |
18f1f465da7bbb70daeb570cda6b80d7b1d70a5800cd1e8add09d4a765230ff7 | @staticmethod
def trades(pair):
'Query public trades method for given currency pair.'
return API.query('trades', pair) | Query public trades method for given currency pair. | BTCe.py | trades | devium/BTCeGUI | 1 | python | @staticmethod
def trades(pair):
return API.query('trades', pair) | @staticmethod
def trades(pair):
return API.query('trades', pair)<|docstring|>Query public trades method for given currency pair.<|endoftext|> |
4431bed6baa8299e0d5df47ad13ceb288d05251615f00d3ea74c5c4b506471d7 | @classmethod
def fromJson(cls, jsonObject):
'\n Restores the TLognormal object from the json object with its\n attributes.\n @param jsonObject: json object\n @return: the restored UQSetting object\n '
key = '_TLognormal__mu'
if (key in jsonObject):
mu = float(jsonObject[key])
key = '_TLognormal__sigma'
if (key in jsonObject):
sigma = float(jsonObject[key])
key = '_TLognormal__a'
if (key in jsonObject):
a = float(jsonObject[key])
key = '_TLognormal__b'
if (key in jsonObject):
b = float(jsonObject[key])
return TLognormal(mu, sigma, a, b) | Restores the TLognormal object from the json object with its
attributes.
@param jsonObject: json object
@return: the restored UQSetting object | lib/pysgpp/extensions/datadriven/uq/dists/TLognormal.py | fromJson | valentjn/thesis | 4 | python | @classmethod
def fromJson(cls, jsonObject):
'\n Restores the TLognormal object from the json object with its\n attributes.\n @param jsonObject: json object\n @return: the restored UQSetting object\n '
key = '_TLognormal__mu'
if (key in jsonObject):
mu = float(jsonObject[key])
key = '_TLognormal__sigma'
if (key in jsonObject):
sigma = float(jsonObject[key])
key = '_TLognormal__a'
if (key in jsonObject):
a = float(jsonObject[key])
key = '_TLognormal__b'
if (key in jsonObject):
b = float(jsonObject[key])
return TLognormal(mu, sigma, a, b) | @classmethod
def fromJson(cls, jsonObject):
'\n Restores the TLognormal object from the json object with its\n attributes.\n @param jsonObject: json object\n @return: the restored UQSetting object\n '
key = '_TLognormal__mu'
if (key in jsonObject):
mu = float(jsonObject[key])
key = '_TLognormal__sigma'
if (key in jsonObject):
sigma = float(jsonObject[key])
key = '_TLognormal__a'
if (key in jsonObject):
a = float(jsonObject[key])
key = '_TLognormal__b'
if (key in jsonObject):
b = float(jsonObject[key])
return TLognormal(mu, sigma, a, b)<|docstring|>Restores the TLognormal object from the json object with its
attributes.
@param jsonObject: json object
@return: the restored UQSetting object<|endoftext|> |
4033b3ebc2ca3846e0c36e7528a1cece8005e19770a06702cc1bce206000abcc | @register.filter
def is_news_page(page):
'Return True if page is a news page.'
return isinstance(page, NewsPage) | Return True if page is a news page. | bc/news/templatetags/news_tags.py | is_news_page | Buckinghamshire-Digital-Service/buckinghamshire-council | 1 | python | @register.filter
def is_news_page(page):
return isinstance(page, NewsPage) | @register.filter
def is_news_page(page):
return isinstance(page, NewsPage)<|docstring|>Return True if page is a news page.<|endoftext|> |
2000755d4aafca8ee302075c29b4ddfb06e3501326f8710decb3d80528842610 | def _parse_tensor_value(tensor_proto, return_list=False):
"Helper method for reading a tensor value from a tensor proto.\n\n The rationale for the distinction between `True` and `False value of\n `return_list` is as follows:\n - `return_list=True` is used for TensorDebugMode values other than\n FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under\n those modes, the value is guaranteed (by contract) to be a 1D float64\n tensor.\n - `return_list=False` is used for the FULL_HEALTH TensorDebugMode\n specifically. Instead, we use `numpy.ndarray` to maximally preserve\n the shape, dtype and value information regarding the underlying tensor\n value. Under that mode, we don't use a python list to represent the\n tensor value because that can lead to loss of information (e.g., both\n float16 and float32 dtypes get mapped to Python floats).\n\n Args:\n tensor_proto: The TensorProto instance from which the tensor value will be\n loaded.\n return_list: Whether the return value will be a nested Python list that\n comes out from `numpy.ndarray.tolist()`.\n\n Returns:\n If parsing is successful, the tensor value as a `numpy.ndarray` or the\n nested Python list converted from it.\n If parsing fails, `None`.\n "
try:
ndarray = tensor_util.MakeNdarray(tensor_proto)
return (ndarray.tolist() if return_list else ndarray)
except TypeError:
return None | Helper method for reading a tensor value from a tensor proto.
The rationale for the distinction between `True` and `False value of
`return_list` is as follows:
- `return_list=True` is used for TensorDebugMode values other than
FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under
those modes, the value is guaranteed (by contract) to be a 1D float64
tensor.
- `return_list=False` is used for the FULL_HEALTH TensorDebugMode
specifically. Instead, we use `numpy.ndarray` to maximally preserve
the shape, dtype and value information regarding the underlying tensor
value. Under that mode, we don't use a python list to represent the
tensor value because that can lead to loss of information (e.g., both
float16 and float32 dtypes get mapped to Python floats).
Args:
tensor_proto: The TensorProto instance from which the tensor value will be
loaded.
return_list: Whether the return value will be a nested Python list that
comes out from `numpy.ndarray.tolist()`.
Returns:
If parsing is successful, the tensor value as a `numpy.ndarray` or the
nested Python list converted from it.
If parsing fails, `None`. | tensorflow/python/debug/lib/debug_events_reader.py | _parse_tensor_value | Meteorix/tensorflow | 78 | python | def _parse_tensor_value(tensor_proto, return_list=False):
"Helper method for reading a tensor value from a tensor proto.\n\n The rationale for the distinction between `True` and `False value of\n `return_list` is as follows:\n - `return_list=True` is used for TensorDebugMode values other than\n FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under\n those modes, the value is guaranteed (by contract) to be a 1D float64\n tensor.\n - `return_list=False` is used for the FULL_HEALTH TensorDebugMode\n specifically. Instead, we use `numpy.ndarray` to maximally preserve\n the shape, dtype and value information regarding the underlying tensor\n value. Under that mode, we don't use a python list to represent the\n tensor value because that can lead to loss of information (e.g., both\n float16 and float32 dtypes get mapped to Python floats).\n\n Args:\n tensor_proto: The TensorProto instance from which the tensor value will be\n loaded.\n return_list: Whether the return value will be a nested Python list that\n comes out from `numpy.ndarray.tolist()`.\n\n Returns:\n If parsing is successful, the tensor value as a `numpy.ndarray` or the\n nested Python list converted from it.\n If parsing fails, `None`.\n "
try:
ndarray = tensor_util.MakeNdarray(tensor_proto)
return (ndarray.tolist() if return_list else ndarray)
except TypeError:
return None | def _parse_tensor_value(tensor_proto, return_list=False):
"Helper method for reading a tensor value from a tensor proto.\n\n The rationale for the distinction between `True` and `False value of\n `return_list` is as follows:\n - `return_list=True` is used for TensorDebugMode values other than\n FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under\n those modes, the value is guaranteed (by contract) to be a 1D float64\n tensor.\n - `return_list=False` is used for the FULL_HEALTH TensorDebugMode\n specifically. Instead, we use `numpy.ndarray` to maximally preserve\n the shape, dtype and value information regarding the underlying tensor\n value. Under that mode, we don't use a python list to represent the\n tensor value because that can lead to loss of information (e.g., both\n float16 and float32 dtypes get mapped to Python floats).\n\n Args:\n tensor_proto: The TensorProto instance from which the tensor value will be\n loaded.\n return_list: Whether the return value will be a nested Python list that\n comes out from `numpy.ndarray.tolist()`.\n\n Returns:\n If parsing is successful, the tensor value as a `numpy.ndarray` or the\n nested Python list converted from it.\n If parsing fails, `None`.\n "
try:
ndarray = tensor_util.MakeNdarray(tensor_proto)
return (ndarray.tolist() if return_list else ndarray)
except TypeError:
return None<|docstring|>Helper method for reading a tensor value from a tensor proto.
The rationale for the distinction between `True` and `False value of
`return_list` is as follows:
- `return_list=True` is used for TensorDebugMode values other than
FULL_TENSOR, e.g., CONCISE_HEALTH, SHAPE and FULL_HEATLH. Under
those modes, the value is guaranteed (by contract) to be a 1D float64
tensor.
- `return_list=False` is used for the FULL_HEALTH TensorDebugMode
specifically. Instead, we use `numpy.ndarray` to maximally preserve
the shape, dtype and value information regarding the underlying tensor
value. Under that mode, we don't use a python list to represent the
tensor value because that can lead to loss of information (e.g., both
float16 and float32 dtypes get mapped to Python floats).
Args:
tensor_proto: The TensorProto instance from which the tensor value will be
loaded.
return_list: Whether the return value will be a nested Python list that
comes out from `numpy.ndarray.tolist()`.
Returns:
If parsing is successful, the tensor value as a `numpy.ndarray` or the
nested Python list converted from it.
If parsing fails, `None`.<|endoftext|> |
b2e8a84bac68ad46e328463905e6e68ce794621ac203b46c60055c7887dc59c9 | def _execution_digest_from_debug_event_proto(debug_event, offset):
'Convert a DebugEvent proto into an ExecutionDigest data object.'
return ExecutionDigest(debug_event.wall_time, offset, debug_event.execution.op_type, output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids or None)) | Convert a DebugEvent proto into an ExecutionDigest data object. | tensorflow/python/debug/lib/debug_events_reader.py | _execution_digest_from_debug_event_proto | Meteorix/tensorflow | 78 | python | def _execution_digest_from_debug_event_proto(debug_event, offset):
return ExecutionDigest(debug_event.wall_time, offset, debug_event.execution.op_type, output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids or None)) | def _execution_digest_from_debug_event_proto(debug_event, offset):
return ExecutionDigest(debug_event.wall_time, offset, debug_event.execution.op_type, output_tensor_device_ids=(debug_event.execution.output_tensor_device_ids or None))<|docstring|>Convert a DebugEvent proto into an ExecutionDigest data object.<|endoftext|> |
b362b6d67c714b4c7dc98cd0ecea6d6491b48f8a05119d150082bff8df50f564 | def _execution_from_debug_event_proto(debug_event, offset):
'Convert a DebugEvent proto into an Execution data object.'
execution_proto = debug_event.execution
debug_tensor_values = None
if (execution_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
pass
elif (execution_proto.tensor_debug_mode != debug_event_pb2.TensorDebugMode.NO_TENSOR):
debug_tensor_values = []
for tensor_proto in execution_proto.tensor_protos:
debug_tensor_values.append(_parse_tensor_value(tensor_proto, return_list=True))
return Execution(_execution_digest_from_debug_event_proto(debug_event, offset), execution_proto.code_location.host_name, tuple(execution_proto.code_location.stack_frame_ids), execution_proto.tensor_debug_mode, graph_id=execution_proto.graph_id, input_tensor_ids=tuple(execution_proto.input_tensor_ids), output_tensor_ids=tuple(execution_proto.output_tensor_ids), debug_tensor_values=_tuple_or_none(debug_tensor_values)) | Convert a DebugEvent proto into an Execution data object. | tensorflow/python/debug/lib/debug_events_reader.py | _execution_from_debug_event_proto | Meteorix/tensorflow | 78 | python | def _execution_from_debug_event_proto(debug_event, offset):
execution_proto = debug_event.execution
debug_tensor_values = None
if (execution_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
pass
elif (execution_proto.tensor_debug_mode != debug_event_pb2.TensorDebugMode.NO_TENSOR):
debug_tensor_values = []
for tensor_proto in execution_proto.tensor_protos:
debug_tensor_values.append(_parse_tensor_value(tensor_proto, return_list=True))
return Execution(_execution_digest_from_debug_event_proto(debug_event, offset), execution_proto.code_location.host_name, tuple(execution_proto.code_location.stack_frame_ids), execution_proto.tensor_debug_mode, graph_id=execution_proto.graph_id, input_tensor_ids=tuple(execution_proto.input_tensor_ids), output_tensor_ids=tuple(execution_proto.output_tensor_ids), debug_tensor_values=_tuple_or_none(debug_tensor_values)) | def _execution_from_debug_event_proto(debug_event, offset):
execution_proto = debug_event.execution
debug_tensor_values = None
if (execution_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
pass
elif (execution_proto.tensor_debug_mode != debug_event_pb2.TensorDebugMode.NO_TENSOR):
debug_tensor_values = []
for tensor_proto in execution_proto.tensor_protos:
debug_tensor_values.append(_parse_tensor_value(tensor_proto, return_list=True))
return Execution(_execution_digest_from_debug_event_proto(debug_event, offset), execution_proto.code_location.host_name, tuple(execution_proto.code_location.stack_frame_ids), execution_proto.tensor_debug_mode, graph_id=execution_proto.graph_id, input_tensor_ids=tuple(execution_proto.input_tensor_ids), output_tensor_ids=tuple(execution_proto.output_tensor_ids), debug_tensor_values=_tuple_or_none(debug_tensor_values))<|docstring|>Convert a DebugEvent proto into an Execution data object.<|endoftext|> |
107268a31c83fb1a2e4df8e20e033006cf36de6232812da91e74b1a870a6a56d | def _generic_iterator(self, file_path):
'A helper method that makes an iterator given a debug-events file path.\n\n Repeated calls to this method create iterators that remember the last\n successful reading position (offset) for each given `file_path`. So the\n iterators are meant for incremental reading of the file.\n\n Args:\n file_path: Path to the file to create the iterator for.\n\n Yields:\n A tuple of (offset, debug_event_proto) on each `next()` call.\n '
yield_count = 0
reader = self._get_reader(file_path)
read_lock = self._reader_read_locks[file_path]
read_lock.acquire()
try:
while True:
current_offset = self._reader_offsets[file_path]
try:
(record, self._reader_offsets[file_path]) = reader.read(current_offset)
except (errors.DataLossError, IndexError):
break
(yield DebugEventWithOffset(debug_event=debug_event_pb2.DebugEvent.FromString(record), offset=current_offset))
yield_count += 1
if ((yield_count % self._READER_RELEASE_PER) == 0):
read_lock.release()
read_lock.acquire()
finally:
read_lock.release() | A helper method that makes an iterator given a debug-events file path.
Repeated calls to this method create iterators that remember the last
successful reading position (offset) for each given `file_path`. So the
iterators are meant for incremental reading of the file.
Args:
file_path: Path to the file to create the iterator for.
Yields:
A tuple of (offset, debug_event_proto) on each `next()` call. | tensorflow/python/debug/lib/debug_events_reader.py | _generic_iterator | Meteorix/tensorflow | 78 | python | def _generic_iterator(self, file_path):
'A helper method that makes an iterator given a debug-events file path.\n\n Repeated calls to this method create iterators that remember the last\n successful reading position (offset) for each given `file_path`. So the\n iterators are meant for incremental reading of the file.\n\n Args:\n file_path: Path to the file to create the iterator for.\n\n Yields:\n A tuple of (offset, debug_event_proto) on each `next()` call.\n '
yield_count = 0
reader = self._get_reader(file_path)
read_lock = self._reader_read_locks[file_path]
read_lock.acquire()
try:
while True:
current_offset = self._reader_offsets[file_path]
try:
(record, self._reader_offsets[file_path]) = reader.read(current_offset)
except (errors.DataLossError, IndexError):
break
(yield DebugEventWithOffset(debug_event=debug_event_pb2.DebugEvent.FromString(record), offset=current_offset))
yield_count += 1
if ((yield_count % self._READER_RELEASE_PER) == 0):
read_lock.release()
read_lock.acquire()
finally:
read_lock.release() | def _generic_iterator(self, file_path):
'A helper method that makes an iterator given a debug-events file path.\n\n Repeated calls to this method create iterators that remember the last\n successful reading position (offset) for each given `file_path`. So the\n iterators are meant for incremental reading of the file.\n\n Args:\n file_path: Path to the file to create the iterator for.\n\n Yields:\n A tuple of (offset, debug_event_proto) on each `next()` call.\n '
yield_count = 0
reader = self._get_reader(file_path)
read_lock = self._reader_read_locks[file_path]
read_lock.acquire()
try:
while True:
current_offset = self._reader_offsets[file_path]
try:
(record, self._reader_offsets[file_path]) = reader.read(current_offset)
except (errors.DataLossError, IndexError):
break
(yield DebugEventWithOffset(debug_event=debug_event_pb2.DebugEvent.FromString(record), offset=current_offset))
yield_count += 1
if ((yield_count % self._READER_RELEASE_PER) == 0):
read_lock.release()
read_lock.acquire()
finally:
read_lock.release()<|docstring|>A helper method that makes an iterator given a debug-events file path.
Repeated calls to this method create iterators that remember the last
successful reading position (offset) for each given `file_path`. So the
iterators are meant for incremental reading of the file.
Args:
file_path: Path to the file to create the iterator for.
Yields:
A tuple of (offset, debug_event_proto) on each `next()` call.<|endoftext|> |
b001233ed3e0291a8e675d169127bf9108ac7eb80846d39124e99648d94e74aa | def _get_reader(self, file_path):
'Get a random-access reader for TFRecords file at file_path.'
file_path = compat.as_bytes(file_path)
if (file_path not in self._readers):
with self._readers_lock:
if (file_path not in self._readers):
self._readers[file_path] = tf_record.tf_record_random_reader(file_path)
self._reader_read_locks[file_path] = threading.Lock()
self._reader_offsets[file_path] = 0
return self._readers[file_path] | Get a random-access reader for TFRecords file at file_path. | tensorflow/python/debug/lib/debug_events_reader.py | _get_reader | Meteorix/tensorflow | 78 | python | def _get_reader(self, file_path):
file_path = compat.as_bytes(file_path)
if (file_path not in self._readers):
with self._readers_lock:
if (file_path not in self._readers):
self._readers[file_path] = tf_record.tf_record_random_reader(file_path)
self._reader_read_locks[file_path] = threading.Lock()
self._reader_offsets[file_path] = 0
return self._readers[file_path] | def _get_reader(self, file_path):
file_path = compat.as_bytes(file_path)
if (file_path not in self._readers):
with self._readers_lock:
if (file_path not in self._readers):
self._readers[file_path] = tf_record.tf_record_random_reader(file_path)
self._reader_read_locks[file_path] = threading.Lock()
self._reader_offsets[file_path] = 0
return self._readers[file_path]<|docstring|>Get a random-access reader for TFRecords file at file_path.<|endoftext|> |
2f94188db5bd20ddda27f1f2b46a8ea1cfda2b615450cabba3b60cf68b70e909 | def read_source_files_event(self, offset):
'Read a DebugEvent proto at given offset from the .source_files file.'
with self._reader_read_locks[self._source_files_path]:
proto_string = self._get_reader(self._source_files_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | Read a DebugEvent proto at given offset from the .source_files file. | tensorflow/python/debug/lib/debug_events_reader.py | read_source_files_event | Meteorix/tensorflow | 78 | python | def read_source_files_event(self, offset):
with self._reader_read_locks[self._source_files_path]:
proto_string = self._get_reader(self._source_files_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | def read_source_files_event(self, offset):
with self._reader_read_locks[self._source_files_path]:
proto_string = self._get_reader(self._source_files_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string)<|docstring|>Read a DebugEvent proto at given offset from the .source_files file.<|endoftext|> |
9c9bcf271d2cb3861c9ffccac428c40a9e5182a89a843020b30902ac7eb4f7ab | def read_graphs_event(self, offset):
'Read a DebugEvent proto at a given offset from the .graphs file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
return debug_event_pb2.DebugEvent.FromString(self._get_reader(self._graphs_path).read(offset)[0]) | Read a DebugEvent proto at a given offset from the .graphs file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file. | tensorflow/python/debug/lib/debug_events_reader.py | read_graphs_event | Meteorix/tensorflow | 78 | python | def read_graphs_event(self, offset):
'Read a DebugEvent proto at a given offset from the .graphs file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
return debug_event_pb2.DebugEvent.FromString(self._get_reader(self._graphs_path).read(offset)[0]) | def read_graphs_event(self, offset):
'Read a DebugEvent proto at a given offset from the .graphs file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
return debug_event_pb2.DebugEvent.FromString(self._get_reader(self._graphs_path).read(offset)[0])<|docstring|>Read a DebugEvent proto at a given offset from the .graphs file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file.<|endoftext|> |
0b9423dc1c4384610b0b8aefee5c5ac1c199326b62f638ee99b194e66943230d | def read_execution_event(self, offset):
'Read a DebugEvent proto at a given offset from the .execution file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._execution_path]:
proto_string = self._get_reader(self._execution_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | Read a DebugEvent proto at a given offset from the .execution file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file. | tensorflow/python/debug/lib/debug_events_reader.py | read_execution_event | Meteorix/tensorflow | 78 | python | def read_execution_event(self, offset):
'Read a DebugEvent proto at a given offset from the .execution file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._execution_path]:
proto_string = self._get_reader(self._execution_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | def read_execution_event(self, offset):
'Read a DebugEvent proto at a given offset from the .execution file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._execution_path]:
proto_string = self._get_reader(self._execution_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string)<|docstring|>Read a DebugEvent proto at a given offset from the .execution file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file.<|endoftext|> |
5cd40c9c5904097f1635ed858fc3824fda4ee26c0afc06bd77714a2c385fabac | def read_graph_execution_traces_event(self, offset):
'Read DebugEvent at given offset from .graph_execution_traces file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._graph_execution_traces_path]:
proto_string = self._get_reader(self._graph_execution_traces_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | Read DebugEvent at given offset from .graph_execution_traces file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file. | tensorflow/python/debug/lib/debug_events_reader.py | read_graph_execution_traces_event | Meteorix/tensorflow | 78 | python | def read_graph_execution_traces_event(self, offset):
'Read DebugEvent at given offset from .graph_execution_traces file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._graph_execution_traces_path]:
proto_string = self._get_reader(self._graph_execution_traces_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string) | def read_graph_execution_traces_event(self, offset):
'Read DebugEvent at given offset from .graph_execution_traces file.\n\n Args:\n offset: Offset to read the DebugEvent proto from.\n\n Returns:\n A DebugEventProto.\n\n Raises:\n `errors.DataLossError` if offset is at a wrong location.\n `IndexError` if offset is out of range of the file.\n '
with self._reader_read_locks[self._graph_execution_traces_path]:
proto_string = self._get_reader(self._graph_execution_traces_path).read(offset)[0]
return debug_event_pb2.DebugEvent.FromString(proto_string)<|docstring|>Read DebugEvent at given offset from .graph_execution_traces file.
Args:
offset: Offset to read the DebugEvent proto from.
Returns:
A DebugEventProto.
Raises:
`errors.DataLossError` if offset is at a wrong location.
`IndexError` if offset is out of range of the file.<|endoftext|> |
196bf4249b7ffb402ca8deb5699e7b1c0c521686ec035b9d5392ea57b709509c | def add_inner_graph_id(self, inner_graph_id):
'Add the debugger-generated ID of a graph nested within this graph.\n\n Args:\n inner_graph_id: The debugger-generated ID of the nested inner graph.\n '
assert isinstance(inner_graph_id, six.string_types)
self._inner_graph_ids.append(inner_graph_id) | Add the debugger-generated ID of a graph nested within this graph.
Args:
inner_graph_id: The debugger-generated ID of the nested inner graph. | tensorflow/python/debug/lib/debug_events_reader.py | add_inner_graph_id | Meteorix/tensorflow | 78 | python | def add_inner_graph_id(self, inner_graph_id):
'Add the debugger-generated ID of a graph nested within this graph.\n\n Args:\n inner_graph_id: The debugger-generated ID of the nested inner graph.\n '
assert isinstance(inner_graph_id, six.string_types)
self._inner_graph_ids.append(inner_graph_id) | def add_inner_graph_id(self, inner_graph_id):
'Add the debugger-generated ID of a graph nested within this graph.\n\n Args:\n inner_graph_id: The debugger-generated ID of the nested inner graph.\n '
assert isinstance(inner_graph_id, six.string_types)
self._inner_graph_ids.append(inner_graph_id)<|docstring|>Add the debugger-generated ID of a graph nested within this graph.
Args:
inner_graph_id: The debugger-generated ID of the nested inner graph.<|endoftext|> |
afd28dfe626243319c7ea7ff83c40110b56da491b85a78d0d3bc610c9920ef29 | def add_op(self, graph_op_creation_digest):
'Add an op creation data object.\n\n Args:\n graph_op_creation_digest: A GraphOpCreationDigest data object describing\n the creation of an op inside this graph.\n '
if (graph_op_creation_digest.op_name in self._op_by_name):
raise ValueError(('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type)))
self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest | Add an op creation data object.
Args:
graph_op_creation_digest: A GraphOpCreationDigest data object describing
the creation of an op inside this graph. | tensorflow/python/debug/lib/debug_events_reader.py | add_op | Meteorix/tensorflow | 78 | python | def add_op(self, graph_op_creation_digest):
'Add an op creation data object.\n\n Args:\n graph_op_creation_digest: A GraphOpCreationDigest data object describing\n the creation of an op inside this graph.\n '
if (graph_op_creation_digest.op_name in self._op_by_name):
raise ValueError(('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type)))
self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest | def add_op(self, graph_op_creation_digest):
'Add an op creation data object.\n\n Args:\n graph_op_creation_digest: A GraphOpCreationDigest data object describing\n the creation of an op inside this graph.\n '
if (graph_op_creation_digest.op_name in self._op_by_name):
raise ValueError(('Duplicate op name: %s (op type: %s)' % (graph_op_creation_digest.op_name, graph_op_creation_digest.op_type)))
self._op_by_name[graph_op_creation_digest.op_name] = graph_op_creation_digest<|docstring|>Add an op creation data object.
Args:
graph_op_creation_digest: A GraphOpCreationDigest data object describing
the creation of an op inside this graph.<|endoftext|> |
c2b0adea4b1b2189670e1921076b787a99d62f78331fc470a03b57d575e20e5e | def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):
'Add a consuming op for this op.\n\n Args:\n src_op_name: Name of the op of which the output tensor is being consumed.\n src_slot: 0-based output slot of the op being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives the tensor\n from this op.\n '
self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot)) | Add a consuming op for this op.
Args:
src_op_name: Name of the op of which the output tensor is being consumed.
src_slot: 0-based output slot of the op being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives the tensor
from this op. | tensorflow/python/debug/lib/debug_events_reader.py | add_op_consumer | Meteorix/tensorflow | 78 | python | def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):
'Add a consuming op for this op.\n\n Args:\n src_op_name: Name of the op of which the output tensor is being consumed.\n src_slot: 0-based output slot of the op being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives the tensor\n from this op.\n '
self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot)) | def add_op_consumer(self, src_op_name, src_slot, dst_op_name, dst_slot):
'Add a consuming op for this op.\n\n Args:\n src_op_name: Name of the op of which the output tensor is being consumed.\n src_slot: 0-based output slot of the op being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives the tensor\n from this op.\n '
self._op_consumers[src_op_name].append((src_slot, dst_op_name, dst_slot))<|docstring|>Add a consuming op for this op.
Args:
src_op_name: Name of the op of which the output tensor is being consumed.
src_slot: 0-based output slot of the op being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives the tensor
from this op.<|endoftext|> |
fed86f11884228975875d9c0c808952f5f9eb203d5bbe4c4b914b657f43bf6f9 | def get_tensor_id(self, op_name, output_slot):
'Get the ID of a symbolic tensor in this graph.'
return self._op_by_name[op_name].output_tensor_ids[output_slot] | Get the ID of a symbolic tensor in this graph. | tensorflow/python/debug/lib/debug_events_reader.py | get_tensor_id | Meteorix/tensorflow | 78 | python | def get_tensor_id(self, op_name, output_slot):
return self._op_by_name[op_name].output_tensor_ids[output_slot] | def get_tensor_id(self, op_name, output_slot):
return self._op_by_name[op_name].output_tensor_ids[output_slot]<|docstring|>Get the ID of a symbolic tensor in this graph.<|endoftext|> |
a428c623725ebe42346502b6527955a53b24a7112325d45d54f2ed9a3aecd474 | def get_op_creation_digest(self, op_name):
'Get the GraphOpCreationDigest for a op in the graph.'
return self._op_by_name[op_name] | Get the GraphOpCreationDigest for a op in the graph. | tensorflow/python/debug/lib/debug_events_reader.py | get_op_creation_digest | Meteorix/tensorflow | 78 | python | def get_op_creation_digest(self, op_name):
return self._op_by_name[op_name] | def get_op_creation_digest(self, op_name):
return self._op_by_name[op_name]<|docstring|>Get the GraphOpCreationDigest for a op in the graph.<|endoftext|> |
ae5737f2e0f0524ea0f115d1a99ad79e80cb1d633777c19231308fd688086bde | def get_op_consumers(self, src_op_name):
'Get all the downstream consumers of this op.\n\n Only data (non-control) edges are tracked.\n\n Args:\n src_op_name: Name of the op providing the tensor being consumed.\n\n Returns:\n A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of\n the list:\n src_slot: 0-based output slot of the op of which the output tensor\n is being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives\n the tensor from this op.\n '
return self._op_consumers[src_op_name] | Get all the downstream consumers of this op.
Only data (non-control) edges are tracked.
Args:
src_op_name: Name of the op providing the tensor being consumed.
Returns:
A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of
the list:
src_slot: 0-based output slot of the op of which the output tensor
is being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives
the tensor from this op. | tensorflow/python/debug/lib/debug_events_reader.py | get_op_consumers | Meteorix/tensorflow | 78 | python | def get_op_consumers(self, src_op_name):
'Get all the downstream consumers of this op.\n\n Only data (non-control) edges are tracked.\n\n Args:\n src_op_name: Name of the op providing the tensor being consumed.\n\n Returns:\n A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of\n the list:\n src_slot: 0-based output slot of the op of which the output tensor\n is being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives\n the tensor from this op.\n '
return self._op_consumers[src_op_name] | def get_op_consumers(self, src_op_name):
'Get all the downstream consumers of this op.\n\n Only data (non-control) edges are tracked.\n\n Args:\n src_op_name: Name of the op providing the tensor being consumed.\n\n Returns:\n A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of\n the list:\n src_slot: 0-based output slot of the op of which the output tensor\n is being consumed.\n dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")\n dst_slot: 0-based input slot of the consuming op that receives\n the tensor from this op.\n '
return self._op_consumers[src_op_name]<|docstring|>Get all the downstream consumers of this op.
Only data (non-control) edges are tracked.
Args:
src_op_name: Name of the op providing the tensor being consumed.
Returns:
A list of (src_slot, dst_op_name, dst_slot) tuples. In each item of
the list:
src_slot: 0-based output slot of the op of which the output tensor
is being consumed.
dst_op_name: Name of the consuming op (e.g., "Conv2D_3/BiasAdd")
dst_slot: 0-based input slot of the consuming op that receives
the tensor from this op.<|endoftext|> |
aa1fd968490ec43c561b37f73d363bdb382d932dcf9b05e1276e769227a8bca1 | def _load_source_files(self):
'Incrementally read the .source_files DebugEvent file.'
source_files_iter = self._reader.source_files_iterator()
for (debug_event, offset) in source_files_iter:
source_file = debug_event.source_file
self._host_name_file_path_to_offset[(source_file.host_name, source_file.file_path)] = offset | Incrementally read the .source_files DebugEvent file. | tensorflow/python/debug/lib/debug_events_reader.py | _load_source_files | Meteorix/tensorflow | 78 | python | def _load_source_files(self):
source_files_iter = self._reader.source_files_iterator()
for (debug_event, offset) in source_files_iter:
source_file = debug_event.source_file
self._host_name_file_path_to_offset[(source_file.host_name, source_file.file_path)] = offset | def _load_source_files(self):
source_files_iter = self._reader.source_files_iterator()
for (debug_event, offset) in source_files_iter:
source_file = debug_event.source_file
self._host_name_file_path_to_offset[(source_file.host_name, source_file.file_path)] = offset<|docstring|>Incrementally read the .source_files DebugEvent file.<|endoftext|> |
27a6d308d1ee604866f3a98d4bc9854524f1d459e042d61289beebe6b48777e1 | def _load_stack_frames(self):
'Incrementally read the .stack_frames file.\n\n This must be called after _load_source_files().\n It assumes that the following contract is honored by the writer of the tfdbg\n v2 data file set:\n - Before a stack frame is written to the .stack_frames file, the\n corresponding source file information must have been written to the\n .source_files file first.\n '
stack_frames_iter = self._reader.stack_frames_iterator()
for (debug_event, _) in stack_frames_iter:
stack_frame_with_id = debug_event.stack_frame_with_id
file_line_col = stack_frame_with_id.file_line_col
self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col
unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())
for stack_frame_id in unprocessed_stack_frame_ids:
file_line_col = self._unprocessed_stack_frames[stack_frame_id]
if (len(self._host_name_file_path_to_offset) > file_line_col.file_index):
(host_name, file_path) = list(self._host_name_file_path_to_offset.keys())[file_line_col.file_index]
self._stack_frame_by_id[stack_frame_id] = (host_name, file_path, file_line_col.line, file_line_col.func)
del self._unprocessed_stack_frames[stack_frame_id] | Incrementally read the .stack_frames file.
This must be called after _load_source_files().
It assumes that the following contract is honored by the writer of the tfdbg
v2 data file set:
- Before a stack frame is written to the .stack_frames file, the
corresponding source file information must have been written to the
.source_files file first. | tensorflow/python/debug/lib/debug_events_reader.py | _load_stack_frames | Meteorix/tensorflow | 78 | python | def _load_stack_frames(self):
'Incrementally read the .stack_frames file.\n\n This must be called after _load_source_files().\n It assumes that the following contract is honored by the writer of the tfdbg\n v2 data file set:\n - Before a stack frame is written to the .stack_frames file, the\n corresponding source file information must have been written to the\n .source_files file first.\n '
stack_frames_iter = self._reader.stack_frames_iterator()
for (debug_event, _) in stack_frames_iter:
stack_frame_with_id = debug_event.stack_frame_with_id
file_line_col = stack_frame_with_id.file_line_col
self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col
unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())
for stack_frame_id in unprocessed_stack_frame_ids:
file_line_col = self._unprocessed_stack_frames[stack_frame_id]
if (len(self._host_name_file_path_to_offset) > file_line_col.file_index):
(host_name, file_path) = list(self._host_name_file_path_to_offset.keys())[file_line_col.file_index]
self._stack_frame_by_id[stack_frame_id] = (host_name, file_path, file_line_col.line, file_line_col.func)
del self._unprocessed_stack_frames[stack_frame_id] | def _load_stack_frames(self):
'Incrementally read the .stack_frames file.\n\n This must be called after _load_source_files().\n It assumes that the following contract is honored by the writer of the tfdbg\n v2 data file set:\n - Before a stack frame is written to the .stack_frames file, the\n corresponding source file information must have been written to the\n .source_files file first.\n '
stack_frames_iter = self._reader.stack_frames_iterator()
for (debug_event, _) in stack_frames_iter:
stack_frame_with_id = debug_event.stack_frame_with_id
file_line_col = stack_frame_with_id.file_line_col
self._unprocessed_stack_frames[stack_frame_with_id.id] = file_line_col
unprocessed_stack_frame_ids = tuple(self._unprocessed_stack_frames.keys())
for stack_frame_id in unprocessed_stack_frame_ids:
file_line_col = self._unprocessed_stack_frames[stack_frame_id]
if (len(self._host_name_file_path_to_offset) > file_line_col.file_index):
(host_name, file_path) = list(self._host_name_file_path_to_offset.keys())[file_line_col.file_index]
self._stack_frame_by_id[stack_frame_id] = (host_name, file_path, file_line_col.line, file_line_col.func)
del self._unprocessed_stack_frames[stack_frame_id]<|docstring|>Incrementally read the .stack_frames file.
This must be called after _load_source_files().
It assumes that the following contract is honored by the writer of the tfdbg
v2 data file set:
- Before a stack frame is written to the .stack_frames file, the
corresponding source file information must have been written to the
.source_files file first.<|endoftext|> |
8dd9bda101c26c2b778cc2a70027d08f303ac5490026fbe729edc2955626bd3c | def _load_graphs(self):
'Incrementally read the .graphs file.\n\n Compiles the DebuggedGraph and GraphOpCreation data.\n '
graphs_iter = self._reader.graphs_iterator()
for (debug_event, offset) in graphs_iter:
if debug_event.graph_op_creation.ByteSize():
op_creation_proto = debug_event.graph_op_creation
op_digest = GraphOpCreationDigest(debug_event.wall_time, offset, op_creation_proto.graph_id, op_creation_proto.op_type, op_creation_proto.op_name, tuple(op_creation_proto.output_tensor_ids), op_creation_proto.code_location.host_name, tuple(op_creation_proto.code_location.stack_frame_ids), input_names=tuple(op_creation_proto.input_names))
self._graph_op_digests.append(op_digest)
debugged_graph = self._graph_by_id[op_creation_proto.graph_id]
debugged_graph.add_op(op_digest)
for (dst_slot, input_name) in enumerate(op_creation_proto.input_names):
(src_op_name, src_slot) = input_name.split(':')
debugged_graph.add_op_consumer(src_op_name, int(src_slot), op_creation_proto.op_name, dst_slot)
elif debug_event.debugged_graph.ByteSize():
graph_proto = debug_event.debugged_graph
graph = DebuggedGraph((graph_proto.graph_name or None), graph_proto.graph_id, outer_graph_id=(graph_proto.outer_context_id or None))
self._graph_by_id[graph_proto.graph_id] = graph
if graph_proto.outer_context_id:
self._graph_by_id[graph_proto.outer_context_id].add_inner_graph_id(graph.graph_id)
elif debug_event.debugged_device.ByteSize():
device_proto = debug_event.debugged_device
self._device_by_id[device_proto.device_id] = DebuggedDevice(device_proto.device_name, device_proto.device_id) | Incrementally read the .graphs file.
Compiles the DebuggedGraph and GraphOpCreation data. | tensorflow/python/debug/lib/debug_events_reader.py | _load_graphs | Meteorix/tensorflow | 78 | python | def _load_graphs(self):
'Incrementally read the .graphs file.\n\n Compiles the DebuggedGraph and GraphOpCreation data.\n '
graphs_iter = self._reader.graphs_iterator()
for (debug_event, offset) in graphs_iter:
if debug_event.graph_op_creation.ByteSize():
op_creation_proto = debug_event.graph_op_creation
op_digest = GraphOpCreationDigest(debug_event.wall_time, offset, op_creation_proto.graph_id, op_creation_proto.op_type, op_creation_proto.op_name, tuple(op_creation_proto.output_tensor_ids), op_creation_proto.code_location.host_name, tuple(op_creation_proto.code_location.stack_frame_ids), input_names=tuple(op_creation_proto.input_names))
self._graph_op_digests.append(op_digest)
debugged_graph = self._graph_by_id[op_creation_proto.graph_id]
debugged_graph.add_op(op_digest)
for (dst_slot, input_name) in enumerate(op_creation_proto.input_names):
(src_op_name, src_slot) = input_name.split(':')
debugged_graph.add_op_consumer(src_op_name, int(src_slot), op_creation_proto.op_name, dst_slot)
elif debug_event.debugged_graph.ByteSize():
graph_proto = debug_event.debugged_graph
graph = DebuggedGraph((graph_proto.graph_name or None), graph_proto.graph_id, outer_graph_id=(graph_proto.outer_context_id or None))
self._graph_by_id[graph_proto.graph_id] = graph
if graph_proto.outer_context_id:
self._graph_by_id[graph_proto.outer_context_id].add_inner_graph_id(graph.graph_id)
elif debug_event.debugged_device.ByteSize():
device_proto = debug_event.debugged_device
self._device_by_id[device_proto.device_id] = DebuggedDevice(device_proto.device_name, device_proto.device_id) | def _load_graphs(self):
'Incrementally read the .graphs file.\n\n Compiles the DebuggedGraph and GraphOpCreation data.\n '
graphs_iter = self._reader.graphs_iterator()
for (debug_event, offset) in graphs_iter:
if debug_event.graph_op_creation.ByteSize():
op_creation_proto = debug_event.graph_op_creation
op_digest = GraphOpCreationDigest(debug_event.wall_time, offset, op_creation_proto.graph_id, op_creation_proto.op_type, op_creation_proto.op_name, tuple(op_creation_proto.output_tensor_ids), op_creation_proto.code_location.host_name, tuple(op_creation_proto.code_location.stack_frame_ids), input_names=tuple(op_creation_proto.input_names))
self._graph_op_digests.append(op_digest)
debugged_graph = self._graph_by_id[op_creation_proto.graph_id]
debugged_graph.add_op(op_digest)
for (dst_slot, input_name) in enumerate(op_creation_proto.input_names):
(src_op_name, src_slot) = input_name.split(':')
debugged_graph.add_op_consumer(src_op_name, int(src_slot), op_creation_proto.op_name, dst_slot)
elif debug_event.debugged_graph.ByteSize():
graph_proto = debug_event.debugged_graph
graph = DebuggedGraph((graph_proto.graph_name or None), graph_proto.graph_id, outer_graph_id=(graph_proto.outer_context_id or None))
self._graph_by_id[graph_proto.graph_id] = graph
if graph_proto.outer_context_id:
self._graph_by_id[graph_proto.outer_context_id].add_inner_graph_id(graph.graph_id)
elif debug_event.debugged_device.ByteSize():
device_proto = debug_event.debugged_device
self._device_by_id[device_proto.device_id] = DebuggedDevice(device_proto.device_name, device_proto.device_id)<|docstring|>Incrementally read the .graphs file.
Compiles the DebuggedGraph and GraphOpCreation data.<|endoftext|> |
f489edb3206d7caae9539bd57dacb686aee2296bd7fe0d6e979610916a13ca86 | def _load_graph_execution_traces(self):
'Incrementally load the .graph_execution_traces file.'
traces_iter = self._reader.graph_execution_traces_iterator()
for (debug_event, offset) in traces_iter:
self._graph_execution_trace_digests.append(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
graph_execution_trace = self._graph_execution_trace_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_graph_execution_trace((len(self._graph_execution_trace_digests) - 1), graph_execution_trace) | Incrementally load the .graph_execution_traces file. | tensorflow/python/debug/lib/debug_events_reader.py | _load_graph_execution_traces | Meteorix/tensorflow | 78 | python | def _load_graph_execution_traces(self):
traces_iter = self._reader.graph_execution_traces_iterator()
for (debug_event, offset) in traces_iter:
self._graph_execution_trace_digests.append(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
graph_execution_trace = self._graph_execution_trace_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_graph_execution_trace((len(self._graph_execution_trace_digests) - 1), graph_execution_trace) | def _load_graph_execution_traces(self):
traces_iter = self._reader.graph_execution_traces_iterator()
for (debug_event, offset) in traces_iter:
self._graph_execution_trace_digests.append(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
graph_execution_trace = self._graph_execution_trace_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_graph_execution_trace((len(self._graph_execution_trace_digests) - 1), graph_execution_trace)<|docstring|>Incrementally load the .graph_execution_traces file.<|endoftext|> |
033a2de4494e485717860ef02be0e201c98f345c058f83917a1c409fe1119677 | def _graph_execution_trace_from_debug_event_proto(self, debug_event, offset):
'Convert a DebugEvent proto into a GraphExecutionTrace data object.'
trace_proto = debug_event.graph_execution_trace
graph_ids = [trace_proto.tfdbg_context_id]
while True:
graph = self.graph_by_id(graph_ids[0])
if graph.outer_graph_id:
graph_ids.insert(0, graph.outer_graph_id)
else:
break
if (trace_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
debug_tensor_value = None
else:
debug_tensor_value = _parse_tensor_value(trace_proto.tensor_proto, return_list=True)
return GraphExecutionTrace(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset), graph_ids=graph_ids, tensor_debug_mode=trace_proto.tensor_debug_mode, debug_tensor_value=debug_tensor_value, device_name=(trace_proto.device_name or None)) | Convert a DebugEvent proto into a GraphExecutionTrace data object. | tensorflow/python/debug/lib/debug_events_reader.py | _graph_execution_trace_from_debug_event_proto | Meteorix/tensorflow | 78 | python | def _graph_execution_trace_from_debug_event_proto(self, debug_event, offset):
trace_proto = debug_event.graph_execution_trace
graph_ids = [trace_proto.tfdbg_context_id]
while True:
graph = self.graph_by_id(graph_ids[0])
if graph.outer_graph_id:
graph_ids.insert(0, graph.outer_graph_id)
else:
break
if (trace_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
debug_tensor_value = None
else:
debug_tensor_value = _parse_tensor_value(trace_proto.tensor_proto, return_list=True)
return GraphExecutionTrace(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset), graph_ids=graph_ids, tensor_debug_mode=trace_proto.tensor_debug_mode, debug_tensor_value=debug_tensor_value, device_name=(trace_proto.device_name or None)) | def _graph_execution_trace_from_debug_event_proto(self, debug_event, offset):
trace_proto = debug_event.graph_execution_trace
graph_ids = [trace_proto.tfdbg_context_id]
while True:
graph = self.graph_by_id(graph_ids[0])
if graph.outer_graph_id:
graph_ids.insert(0, graph.outer_graph_id)
else:
break
if (trace_proto.tensor_debug_mode == debug_event_pb2.TensorDebugMode.FULL_TENSOR):
debug_tensor_value = None
else:
debug_tensor_value = _parse_tensor_value(trace_proto.tensor_proto, return_list=True)
return GraphExecutionTrace(self._graph_execution_trace_digest_from_debug_event_proto(debug_event, offset), graph_ids=graph_ids, tensor_debug_mode=trace_proto.tensor_debug_mode, debug_tensor_value=debug_tensor_value, device_name=(trace_proto.device_name or None))<|docstring|>Convert a DebugEvent proto into a GraphExecutionTrace data object.<|endoftext|> |
dfe4fc8395c6356e1436d979227989108f190f2f240f268746ef0d054e6ace46 | def _lookup_op_type(self, graph_id, op_name):
'Lookup the type of an op by name and the immediately enclosing graph.\n\n Args:\n graph_id: Debugger-generated ID of the immediately-enclosing graph.\n op_name: Name of the op.\n\n Returns:\n Op type as a str.\n '
return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type | Lookup the type of an op by name and the immediately enclosing graph.
Args:
graph_id: Debugger-generated ID of the immediately-enclosing graph.
op_name: Name of the op.
Returns:
Op type as a str. | tensorflow/python/debug/lib/debug_events_reader.py | _lookup_op_type | Meteorix/tensorflow | 78 | python | def _lookup_op_type(self, graph_id, op_name):
'Lookup the type of an op by name and the immediately enclosing graph.\n\n Args:\n graph_id: Debugger-generated ID of the immediately-enclosing graph.\n op_name: Name of the op.\n\n Returns:\n Op type as a str.\n '
return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type | def _lookup_op_type(self, graph_id, op_name):
'Lookup the type of an op by name and the immediately enclosing graph.\n\n Args:\n graph_id: Debugger-generated ID of the immediately-enclosing graph.\n op_name: Name of the op.\n\n Returns:\n Op type as a str.\n '
return self._graph_by_id[graph_id].get_op_creation_digest(op_name).op_type<|docstring|>Lookup the type of an op by name and the immediately enclosing graph.
Args:
graph_id: Debugger-generated ID of the immediately-enclosing graph.
op_name: Name of the op.
Returns:
Op type as a str.<|endoftext|> |
3472dec08384387ea99c1e78fc11563c892e9a2e19db05ab0902359a7bafee32 | def _load_execution(self):
'Incrementally read the .execution file.'
execution_iter = self._reader.execution_iterator()
for (debug_event, offset) in execution_iter:
self._execution_digests.append(_execution_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
execution = _execution_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_execution((len(self._execution_digests) - 1), execution) | Incrementally read the .execution file. | tensorflow/python/debug/lib/debug_events_reader.py | _load_execution | Meteorix/tensorflow | 78 | python | def _load_execution(self):
execution_iter = self._reader.execution_iterator()
for (debug_event, offset) in execution_iter:
self._execution_digests.append(_execution_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
execution = _execution_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_execution((len(self._execution_digests) - 1), execution) | def _load_execution(self):
execution_iter = self._reader.execution_iterator()
for (debug_event, offset) in execution_iter:
self._execution_digests.append(_execution_digest_from_debug_event_proto(debug_event, offset))
if self._monitors:
execution = _execution_from_debug_event_proto(debug_event, offset)
for monitor in self._monitors:
monitor.on_execution((len(self._execution_digests) - 1), execution)<|docstring|>Incrementally read the .execution file.<|endoftext|> |
73920d5926d8729b373d7472a264d2ba4946848628f6347abbb597ce42866bac | def update(self):
'Perform incremental read of the file set.'
self._load_source_files()
self._load_stack_frames()
self._load_graphs()
self._load_graph_execution_traces()
self._load_execution() | Perform incremental read of the file set. | tensorflow/python/debug/lib/debug_events_reader.py | update | Meteorix/tensorflow | 78 | python | def update(self):
self._load_source_files()
self._load_stack_frames()
self._load_graphs()
self._load_graph_execution_traces()
self._load_execution() | def update(self):
self._load_source_files()
self._load_stack_frames()
self._load_graphs()
self._load_graph_execution_traces()
self._load_execution()<|docstring|>Perform incremental read of the file set.<|endoftext|> |
86b0f57f7b98c577459bc9a7e09a96975b540a57a35f1209fe1dc89b9294fe32 | def source_file_list(self):
'Get a list of source files known to the debugger data reader.\n\n Returns:\n A tuple of `(host_name, file_path)` tuples.\n '
return tuple(self._host_name_file_path_to_offset.keys()) | Get a list of source files known to the debugger data reader.
Returns:
A tuple of `(host_name, file_path)` tuples. | tensorflow/python/debug/lib/debug_events_reader.py | source_file_list | Meteorix/tensorflow | 78 | python | def source_file_list(self):
'Get a list of source files known to the debugger data reader.\n\n Returns:\n A tuple of `(host_name, file_path)` tuples.\n '
return tuple(self._host_name_file_path_to_offset.keys()) | def source_file_list(self):
'Get a list of source files known to the debugger data reader.\n\n Returns:\n A tuple of `(host_name, file_path)` tuples.\n '
return tuple(self._host_name_file_path_to_offset.keys())<|docstring|>Get a list of source files known to the debugger data reader.
Returns:
A tuple of `(host_name, file_path)` tuples.<|endoftext|> |
a2bc6741e50d66d4497dab3de2f33a8f8cea027785ba8b390dc4a718d592c2df | def source_lines(self, host_name, file_path):
'Read the line-by-line content of a source file.\n\n Args:\n host_name: Host name on which the source file is located.\n file_path: File path at which the source file is located.\n\n Returns:\n Lines of the source file as a `list` of `str`s.\n '
offset = self._host_name_file_path_to_offset[(host_name, file_path)]
return list(self._reader.read_source_files_event(offset).source_file.lines) | Read the line-by-line content of a source file.
Args:
host_name: Host name on which the source file is located.
file_path: File path at which the source file is located.
Returns:
Lines of the source file as a `list` of `str`s. | tensorflow/python/debug/lib/debug_events_reader.py | source_lines | Meteorix/tensorflow | 78 | python | def source_lines(self, host_name, file_path):
'Read the line-by-line content of a source file.\n\n Args:\n host_name: Host name on which the source file is located.\n file_path: File path at which the source file is located.\n\n Returns:\n Lines of the source file as a `list` of `str`s.\n '
offset = self._host_name_file_path_to_offset[(host_name, file_path)]
return list(self._reader.read_source_files_event(offset).source_file.lines) | def source_lines(self, host_name, file_path):
'Read the line-by-line content of a source file.\n\n Args:\n host_name: Host name on which the source file is located.\n file_path: File path at which the source file is located.\n\n Returns:\n Lines of the source file as a `list` of `str`s.\n '
offset = self._host_name_file_path_to_offset[(host_name, file_path)]
return list(self._reader.read_source_files_event(offset).source_file.lines)<|docstring|>Read the line-by-line content of a source file.
Args:
host_name: Host name on which the source file is located.
file_path: File path at which the source file is located.
Returns:
Lines of the source file as a `list` of `str`s.<|endoftext|> |
51b509481b50a96d6aaf13a96f3382bf3bf9e52ceb3eb4bd73cc80e2e9a5f471 | def starting_wall_time(self):
'Wall timestamp for when the debugged TensorFlow program started.\n\n Returns:\n Stating wall time as seconds since the epoch, as a `float`.\n '
return self._starting_wall_time | Wall timestamp for when the debugged TensorFlow program started.
Returns:
Stating wall time as seconds since the epoch, as a `float`. | tensorflow/python/debug/lib/debug_events_reader.py | starting_wall_time | Meteorix/tensorflow | 78 | python | def starting_wall_time(self):
'Wall timestamp for when the debugged TensorFlow program started.\n\n Returns:\n Stating wall time as seconds since the epoch, as a `float`.\n '
return self._starting_wall_time | def starting_wall_time(self):
'Wall timestamp for when the debugged TensorFlow program started.\n\n Returns:\n Stating wall time as seconds since the epoch, as a `float`.\n '
return self._starting_wall_time<|docstring|>Wall timestamp for when the debugged TensorFlow program started.
Returns:
Stating wall time as seconds since the epoch, as a `float`.<|endoftext|> |
1a6308d4a1b8f5aa9f3f7a6d25d7aeb8a16d6589b9d34e8646af8e132070b05b | def tensorflow_version(self):
'TensorFlow version used in the debugged TensorFlow program.\n\n Note: this is not necessarily the same as the version of TensorFlow used to\n load the DebugEvent file set.\n\n Returns:\n TensorFlow version used by the debugged program, as a `str`.\n '
return self._tensorflow_version | TensorFlow version used in the debugged TensorFlow program.
Note: this is not necessarily the same as the version of TensorFlow used to
load the DebugEvent file set.
Returns:
TensorFlow version used by the debugged program, as a `str`. | tensorflow/python/debug/lib/debug_events_reader.py | tensorflow_version | Meteorix/tensorflow | 78 | python | def tensorflow_version(self):
'TensorFlow version used in the debugged TensorFlow program.\n\n Note: this is not necessarily the same as the version of TensorFlow used to\n load the DebugEvent file set.\n\n Returns:\n TensorFlow version used by the debugged program, as a `str`.\n '
return self._tensorflow_version | def tensorflow_version(self):
'TensorFlow version used in the debugged TensorFlow program.\n\n Note: this is not necessarily the same as the version of TensorFlow used to\n load the DebugEvent file set.\n\n Returns:\n TensorFlow version used by the debugged program, as a `str`.\n '
return self._tensorflow_version<|docstring|>TensorFlow version used in the debugged TensorFlow program.
Note: this is not necessarily the same as the version of TensorFlow used to
load the DebugEvent file set.
Returns:
TensorFlow version used by the debugged program, as a `str`.<|endoftext|> |
bd8cb922bd5a2d205f3c3d1bd7a66327c0f324182892e7021db18e1b5b0dbe7d | def tfdbg_run_id(self):
'Get the debugger run ID of the debugged TensorFlow program.'
return self._tfdbg_run_id | Get the debugger run ID of the debugged TensorFlow program. | tensorflow/python/debug/lib/debug_events_reader.py | tfdbg_run_id | Meteorix/tensorflow | 78 | python | def tfdbg_run_id(self):
return self._tfdbg_run_id | def tfdbg_run_id(self):
return self._tfdbg_run_id<|docstring|>Get the debugger run ID of the debugged TensorFlow program.<|endoftext|> |
d75658aa7f1e17c05a74d2949f57391b20b6ec71cd240cdd3098c9bc7b960c11 | def outermost_graphs(self):
'Get the number of outer most graphs read so far.'
return [graph for graph in self._graph_by_id.values() if (not graph.outer_graph_id)] | Get the number of outer most graphs read so far. | tensorflow/python/debug/lib/debug_events_reader.py | outermost_graphs | Meteorix/tensorflow | 78 | python | def outermost_graphs(self):
return [graph for graph in self._graph_by_id.values() if (not graph.outer_graph_id)] | def outermost_graphs(self):
return [graph for graph in self._graph_by_id.values() if (not graph.outer_graph_id)]<|docstring|>Get the number of outer most graphs read so far.<|endoftext|> |
efc3eeb25c5b1145989f2155da772db8d8091551fed0abf711fe0fd412d89be5 | def graph_by_id(self, graph_id):
'Get a DebuggedGraph object by its ID.'
return self._graph_by_id[graph_id] | Get a DebuggedGraph object by its ID. | tensorflow/python/debug/lib/debug_events_reader.py | graph_by_id | Meteorix/tensorflow | 78 | python | def graph_by_id(self, graph_id):
return self._graph_by_id[graph_id] | def graph_by_id(self, graph_id):
return self._graph_by_id[graph_id]<|docstring|>Get a DebuggedGraph object by its ID.<|endoftext|> |
52bb2001c27e5ca344440b007d83e040adfd8d2540ef329df1c673ec1358d3ce | def device_name_by_id(self, device_id):
'Get the name of a device by the debugger-generated ID of the device.'
return self._device_by_id[device_id].device_name | Get the name of a device by the debugger-generated ID of the device. | tensorflow/python/debug/lib/debug_events_reader.py | device_name_by_id | Meteorix/tensorflow | 78 | python | def device_name_by_id(self, device_id):
return self._device_by_id[device_id].device_name | def device_name_by_id(self, device_id):
return self._device_by_id[device_id].device_name<|docstring|>Get the name of a device by the debugger-generated ID of the device.<|endoftext|> |
892add5288bc6cbdd567595b957921856ffdaf9750c124cb682363f2c5c75226 | def device_name_map(self):
'Get a map mapping device IDs to device names.'
return {device_id: self._device_by_id[device_id].device_name for device_id in self._device_by_id} | Get a map mapping device IDs to device names. | tensorflow/python/debug/lib/debug_events_reader.py | device_name_map | Meteorix/tensorflow | 78 | python | def device_name_map(self):
return {device_id: self._device_by_id[device_id].device_name for device_id in self._device_by_id} | def device_name_map(self):
return {device_id: self._device_by_id[device_id].device_name for device_id in self._device_by_id}<|docstring|>Get a map mapping device IDs to device names.<|endoftext|> |
0aad55a0e0b2f0944b5d613f7fa73cd7aaaa69dd894953a9ecef0aa4995d7c8e | def graph_op_digests(self, op_type=None):
'Get the list of the digests for graph-op creation so far.\n\n Args:\n op_type: Optional op type to filter the creation events with.\n\n Returns:\n A list of `GraphOpCreationDigest` objects.\n '
if (op_type is not None):
return [digest for digest in self._graph_op_digests if (digest.op_type == op_type)]
else:
return self._graph_op_digests | Get the list of the digests for graph-op creation so far.
Args:
op_type: Optional op type to filter the creation events with.
Returns:
A list of `GraphOpCreationDigest` objects. | tensorflow/python/debug/lib/debug_events_reader.py | graph_op_digests | Meteorix/tensorflow | 78 | python | def graph_op_digests(self, op_type=None):
'Get the list of the digests for graph-op creation so far.\n\n Args:\n op_type: Optional op type to filter the creation events with.\n\n Returns:\n A list of `GraphOpCreationDigest` objects.\n '
if (op_type is not None):
return [digest for digest in self._graph_op_digests if (digest.op_type == op_type)]
else:
return self._graph_op_digests | def graph_op_digests(self, op_type=None):
'Get the list of the digests for graph-op creation so far.\n\n Args:\n op_type: Optional op type to filter the creation events with.\n\n Returns:\n A list of `GraphOpCreationDigest` objects.\n '
if (op_type is not None):
return [digest for digest in self._graph_op_digests if (digest.op_type == op_type)]
else:
return self._graph_op_digests<|docstring|>Get the list of the digests for graph-op creation so far.
Args:
op_type: Optional op type to filter the creation events with.
Returns:
A list of `GraphOpCreationDigest` objects.<|endoftext|> |
2e18cd61ed27d985b5ee73f4caed4d58671b98b4a7c05323a06fbd12f6767000 | def graph_execution_traces(self, digest=False, begin=None, end=None):
'Get all the intra-graph execution tensor traces read so far.\n\n Args:\n digest: Whether the results will be returned in the more light-weight\n digest form.\n begin: Optional beginning index for the requested traces or their digests.\n Python-style negative indices are supported.\n end: Optional ending index for the requested traces or their digests.\n Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `GraphExecutionTraceDigest` objects.\n Else: a `list` of `GraphExecutionTrace` objects.\n '
digests = self._graph_execution_trace_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_graph_execution_trace(digest) for digest in digests] | Get all the intra-graph execution tensor traces read so far.
Args:
digest: Whether the results will be returned in the more light-weight
digest form.
begin: Optional beginning index for the requested traces or their digests.
Python-style negative indices are supported.
end: Optional ending index for the requested traces or their digests.
Python-style negative indices are supported.
Returns:
If `digest`: a `list` of `GraphExecutionTraceDigest` objects.
Else: a `list` of `GraphExecutionTrace` objects. | tensorflow/python/debug/lib/debug_events_reader.py | graph_execution_traces | Meteorix/tensorflow | 78 | python | def graph_execution_traces(self, digest=False, begin=None, end=None):
'Get all the intra-graph execution tensor traces read so far.\n\n Args:\n digest: Whether the results will be returned in the more light-weight\n digest form.\n begin: Optional beginning index for the requested traces or their digests.\n Python-style negative indices are supported.\n end: Optional ending index for the requested traces or their digests.\n Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `GraphExecutionTraceDigest` objects.\n Else: a `list` of `GraphExecutionTrace` objects.\n '
digests = self._graph_execution_trace_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_graph_execution_trace(digest) for digest in digests] | def graph_execution_traces(self, digest=False, begin=None, end=None):
'Get all the intra-graph execution tensor traces read so far.\n\n Args:\n digest: Whether the results will be returned in the more light-weight\n digest form.\n begin: Optional beginning index for the requested traces or their digests.\n Python-style negative indices are supported.\n end: Optional ending index for the requested traces or their digests.\n Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `GraphExecutionTraceDigest` objects.\n Else: a `list` of `GraphExecutionTrace` objects.\n '
digests = self._graph_execution_trace_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_graph_execution_trace(digest) for digest in digests]<|docstring|>Get all the intra-graph execution tensor traces read so far.
Args:
digest: Whether the results will be returned in the more light-weight
digest form.
begin: Optional beginning index for the requested traces or their digests.
Python-style negative indices are supported.
end: Optional ending index for the requested traces or their digests.
Python-style negative indices are supported.
Returns:
If `digest`: a `list` of `GraphExecutionTraceDigest` objects.
Else: a `list` of `GraphExecutionTrace` objects.<|endoftext|> |
c73926b4d8399627dce703375c66a65b06368d09cd6f340fd21bf52f7c3901b0 | def num_graph_execution_traces(self):
'Get the number of graph execution traces read so far.'
return len(self._graph_execution_trace_digests) | Get the number of graph execution traces read so far. | tensorflow/python/debug/lib/debug_events_reader.py | num_graph_execution_traces | Meteorix/tensorflow | 78 | python | def num_graph_execution_traces(self):
return len(self._graph_execution_trace_digests) | def num_graph_execution_traces(self):
return len(self._graph_execution_trace_digests)<|docstring|>Get the number of graph execution traces read so far.<|endoftext|> |
9c9940de92babb454e3de9662502f9de419924fdd6c1d19a5929a39401ba1fc2 | def executions(self, digest=False, begin=None, end=None):
'Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\n Args:\n digest: Whether the results are returned in a digest form, i.e.,\n `ExecutionDigest` format, instead of the more detailed `Execution`\n format.\n begin: Optional beginning index for the requested execution data objects\n or their digests. Python-style negative indices are supported.\n end: Optional ending index for the requested execution data objects or\n their digests. Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `ExecutionDigest` objects.\n Else: a `list` of `Execution` objects.\n '
digests = self._execution_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_execution(digest) for digest in digests] | Get `Execution`s or `ExecutionDigest`s this reader has read so far.
Args:
digest: Whether the results are returned in a digest form, i.e.,
`ExecutionDigest` format, instead of the more detailed `Execution`
format.
begin: Optional beginning index for the requested execution data objects
or their digests. Python-style negative indices are supported.
end: Optional ending index for the requested execution data objects or
their digests. Python-style negative indices are supported.
Returns:
If `digest`: a `list` of `ExecutionDigest` objects.
Else: a `list` of `Execution` objects. | tensorflow/python/debug/lib/debug_events_reader.py | executions | Meteorix/tensorflow | 78 | python | def executions(self, digest=False, begin=None, end=None):
'Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\n Args:\n digest: Whether the results are returned in a digest form, i.e.,\n `ExecutionDigest` format, instead of the more detailed `Execution`\n format.\n begin: Optional beginning index for the requested execution data objects\n or their digests. Python-style negative indices are supported.\n end: Optional ending index for the requested execution data objects or\n their digests. Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `ExecutionDigest` objects.\n Else: a `list` of `Execution` objects.\n '
digests = self._execution_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_execution(digest) for digest in digests] | def executions(self, digest=False, begin=None, end=None):
'Get `Execution`s or `ExecutionDigest`s this reader has read so far.\n\n Args:\n digest: Whether the results are returned in a digest form, i.e.,\n `ExecutionDigest` format, instead of the more detailed `Execution`\n format.\n begin: Optional beginning index for the requested execution data objects\n or their digests. Python-style negative indices are supported.\n end: Optional ending index for the requested execution data objects or\n their digests. Python-style negative indices are supported.\n\n Returns:\n If `digest`: a `list` of `ExecutionDigest` objects.\n Else: a `list` of `Execution` objects.\n '
digests = self._execution_digests
if ((begin is not None) or (end is not None)):
begin = (begin or 0)
end = (end or len(digests))
digests = digests[begin:end]
if digest:
return digests
else:
return [self.read_execution(digest) for digest in digests]<|docstring|>Get `Execution`s or `ExecutionDigest`s this reader has read so far.
Args:
digest: Whether the results are returned in a digest form, i.e.,
`ExecutionDigest` format, instead of the more detailed `Execution`
format.
begin: Optional beginning index for the requested execution data objects
or their digests. Python-style negative indices are supported.
end: Optional ending index for the requested execution data objects or
their digests. Python-style negative indices are supported.
Returns:
If `digest`: a `list` of `ExecutionDigest` objects.
Else: a `list` of `Execution` objects.<|endoftext|> |
7e537948e38cf2ea2e768570a05d19ee25f77e02596797730a0aa0b45d5633c0 | def num_executions(self):
'Get the number of execution events read so far.'
return len(self._execution_digests) | Get the number of execution events read so far. | tensorflow/python/debug/lib/debug_events_reader.py | num_executions | Meteorix/tensorflow | 78 | python | def num_executions(self):
return len(self._execution_digests) | def num_executions(self):
return len(self._execution_digests)<|docstring|>Get the number of execution events read so far.<|endoftext|> |
7fcac17359e3002d7a12bd28e9e7aff3c1b21b585103629eaf05e0e8d69a15de | def read_execution(self, execution_digest):
'Read a detailed Execution object.'
debug_event = self._reader.read_execution_event(execution_digest.offset)
return _execution_from_debug_event_proto(debug_event, execution_digest.offset) | Read a detailed Execution object. | tensorflow/python/debug/lib/debug_events_reader.py | read_execution | Meteorix/tensorflow | 78 | python | def read_execution(self, execution_digest):
debug_event = self._reader.read_execution_event(execution_digest.offset)
return _execution_from_debug_event_proto(debug_event, execution_digest.offset) | def read_execution(self, execution_digest):
debug_event = self._reader.read_execution_event(execution_digest.offset)
return _execution_from_debug_event_proto(debug_event, execution_digest.offset)<|docstring|>Read a detailed Execution object.<|endoftext|> |
0ec036593f522ed8ee71c719634df5d53797d58f99ee0a92fbcfc4eba1efb2e7 | def read_graph_execution_trace(self, graph_execution_trace_digest):
'Read the detailed graph execution trace.\n\n Args:\n graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.\n\n Returns:\n The corresponding `GraphExecutionTrace` object.\n '
debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.offset)
return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.offset) | Read the detailed graph execution trace.
Args:
graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.
Returns:
The corresponding `GraphExecutionTrace` object. | tensorflow/python/debug/lib/debug_events_reader.py | read_graph_execution_trace | Meteorix/tensorflow | 78 | python | def read_graph_execution_trace(self, graph_execution_trace_digest):
'Read the detailed graph execution trace.\n\n Args:\n graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.\n\n Returns:\n The corresponding `GraphExecutionTrace` object.\n '
debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.offset)
return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.offset) | def read_graph_execution_trace(self, graph_execution_trace_digest):
'Read the detailed graph execution trace.\n\n Args:\n graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.\n\n Returns:\n The corresponding `GraphExecutionTrace` object.\n '
debug_event = self._reader.read_graph_execution_traces_event(graph_execution_trace_digest.offset)
return self._graph_execution_trace_from_debug_event_proto(debug_event, graph_execution_trace_digest.offset)<|docstring|>Read the detailed graph execution trace.
Args:
graph_execution_trace_digest: A `GraphExecutionTraceDigest` object.
Returns:
The corresponding `GraphExecutionTrace` object.<|endoftext|> |
9c78ec4d4267c4334c8a0a8e8b045afc5863a5d43c5a27552697dea7485182ef | def read_execution_stack_trace(self, execution):
'Read the stack trace of a given Execution object.\n\n Args:\n execution: The Execution object of interest.\n\n Returns:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]
return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids]) | Read the stack trace of a given Execution object.
Args:
execution: The Execution object of interest.
Returns:
1. The host name.
2. The stack trace, as a list of (file_path, lineno, func) tuples. | tensorflow/python/debug/lib/debug_events_reader.py | read_execution_stack_trace | Meteorix/tensorflow | 78 | python | def read_execution_stack_trace(self, execution):
'Read the stack trace of a given Execution object.\n\n Args:\n execution: The Execution object of interest.\n\n Returns:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]
return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids]) | def read_execution_stack_trace(self, execution):
'Read the stack trace of a given Execution object.\n\n Args:\n execution: The Execution object of interest.\n\n Returns:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0]
return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids])<|docstring|>Read the stack trace of a given Execution object.
Args:
execution: The Execution object of interest.
Returns:
1. The host name.
2. The stack trace, as a list of (file_path, lineno, func) tuples.<|endoftext|> |
40cb5dee6922bf627261fc8c5081165053146695109f0d5ed5b69be3f7e8b1b8 | def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):
'Read the stack trace of a given graph op creation object.\n\n Args:\n graph_op_creation_digest: The GraphOpCreationDigest object of interest.\n\n Returns:\n A tuple consisting of:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids]) | Read the stack trace of a given graph op creation object.
Args:
graph_op_creation_digest: The GraphOpCreationDigest object of interest.
Returns:
A tuple consisting of:
1. The host name.
2. The stack trace, as a list of (file_path, lineno, func) tuples. | tensorflow/python/debug/lib/debug_events_reader.py | read_graph_op_creation_stack_trace | Meteorix/tensorflow | 78 | python | def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):
'Read the stack trace of a given graph op creation object.\n\n Args:\n graph_op_creation_digest: The GraphOpCreationDigest object of interest.\n\n Returns:\n A tuple consisting of:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids]) | def read_graph_op_creation_stack_trace(self, graph_op_creation_digest):
'Read the stack trace of a given graph op creation object.\n\n Args:\n graph_op_creation_digest: The GraphOpCreationDigest object of interest.\n\n Returns:\n A tuple consisting of:\n 1. The host name.\n 2. The stack trace, as a list of (file_path, lineno, func) tuples.\n '
return (graph_op_creation_digest.host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in graph_op_creation_digest.stack_frame_ids])<|docstring|>Read the stack trace of a given graph op creation object.
Args:
graph_op_creation_digest: The GraphOpCreationDigest object of interest.
Returns:
A tuple consisting of:
1. The host name.
2. The stack trace, as a list of (file_path, lineno, func) tuples.<|endoftext|> |
f05d22c4e9c442ace6b782b8ce0bce8bae1d4059138a256fb48d59d8836d8259 | def execution_to_tensor_values(self, execution):
'Read the full tensor values from an Execution or ExecutionDigest.\n\n Args:\n execution: An `ExecutionDigest` or `ExeuctionDigest` object.\n\n Returns:\n A list of numpy arrays representing the output tensor values of the\n execution event.\n '
debug_event = self._reader.read_execution_event(execution.offset)
return [_parse_tensor_value(tensor_proto) for tensor_proto in debug_event.execution.tensor_protos] | Read the full tensor values from an Execution or ExecutionDigest.
Args:
execution: An `ExecutionDigest` or `ExeuctionDigest` object.
Returns:
A list of numpy arrays representing the output tensor values of the
execution event. | tensorflow/python/debug/lib/debug_events_reader.py | execution_to_tensor_values | Meteorix/tensorflow | 78 | python | def execution_to_tensor_values(self, execution):
'Read the full tensor values from an Execution or ExecutionDigest.\n\n Args:\n execution: An `ExecutionDigest` or `ExeuctionDigest` object.\n\n Returns:\n A list of numpy arrays representing the output tensor values of the\n execution event.\n '
debug_event = self._reader.read_execution_event(execution.offset)
return [_parse_tensor_value(tensor_proto) for tensor_proto in debug_event.execution.tensor_protos] | def execution_to_tensor_values(self, execution):
'Read the full tensor values from an Execution or ExecutionDigest.\n\n Args:\n execution: An `ExecutionDigest` or `ExeuctionDigest` object.\n\n Returns:\n A list of numpy arrays representing the output tensor values of the\n execution event.\n '
debug_event = self._reader.read_execution_event(execution.offset)
return [_parse_tensor_value(tensor_proto) for tensor_proto in debug_event.execution.tensor_protos]<|docstring|>Read the full tensor values from an Execution or ExecutionDigest.
Args:
execution: An `ExecutionDigest` or `ExeuctionDigest` object.
Returns:
A list of numpy arrays representing the output tensor values of the
execution event.<|endoftext|> |
234ab2c64a8e37038bdb0028408d85ed7041dd9565bed680813a79c18232d311 | def graph_execution_trace_to_tensor_value(self, trace):
'Read full tensor values from an Execution or ExecutionDigest.\n\n Args:\n trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.\n\n Returns:\n A numpy array representing the output tensor value of the intra-graph\n tensor execution event.\n '
debug_event = self._reader.read_graph_execution_traces_event(trace.offset)
return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto) | Read full tensor values from an Execution or ExecutionDigest.
Args:
trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.
Returns:
A numpy array representing the output tensor value of the intra-graph
tensor execution event. | tensorflow/python/debug/lib/debug_events_reader.py | graph_execution_trace_to_tensor_value | Meteorix/tensorflow | 78 | python | def graph_execution_trace_to_tensor_value(self, trace):
'Read full tensor values from an Execution or ExecutionDigest.\n\n Args:\n trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.\n\n Returns:\n A numpy array representing the output tensor value of the intra-graph\n tensor execution event.\n '
debug_event = self._reader.read_graph_execution_traces_event(trace.offset)
return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto) | def graph_execution_trace_to_tensor_value(self, trace):
'Read full tensor values from an Execution or ExecutionDigest.\n\n Args:\n trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.\n\n Returns:\n A numpy array representing the output tensor value of the intra-graph\n tensor execution event.\n '
debug_event = self._reader.read_graph_execution_traces_event(trace.offset)
return _parse_tensor_value(debug_event.graph_execution_trace.tensor_proto)<|docstring|>Read full tensor values from an Execution or ExecutionDigest.
Args:
trace: An `GraphExecutionTraceDigest` or `GraphExecutionTrace` object.
Returns:
A numpy array representing the output tensor value of the intra-graph
tensor execution event.<|endoftext|> |
8cbdbdf70e89b92eb742f04db85326c120cec0cbc66a6aa25caadfef7896e72b | def symbolic_tensor_id(self, graph_id, op_name, output_slot):
'Get the ID of a symbolic tensor.\n\n Args:\n graph_id: The ID of the immediately-enclosing graph.\n op_name: Name of the op.\n output_slot: Output slot as an int.\n\n Returns:\n The ID of the symbolic tensor as an int.\n '
return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot) | Get the ID of a symbolic tensor.
Args:
graph_id: The ID of the immediately-enclosing graph.
op_name: Name of the op.
output_slot: Output slot as an int.
Returns:
The ID of the symbolic tensor as an int. | tensorflow/python/debug/lib/debug_events_reader.py | symbolic_tensor_id | Meteorix/tensorflow | 78 | python | def symbolic_tensor_id(self, graph_id, op_name, output_slot):
'Get the ID of a symbolic tensor.\n\n Args:\n graph_id: The ID of the immediately-enclosing graph.\n op_name: Name of the op.\n output_slot: Output slot as an int.\n\n Returns:\n The ID of the symbolic tensor as an int.\n '
return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot) | def symbolic_tensor_id(self, graph_id, op_name, output_slot):
'Get the ID of a symbolic tensor.\n\n Args:\n graph_id: The ID of the immediately-enclosing graph.\n op_name: Name of the op.\n output_slot: Output slot as an int.\n\n Returns:\n The ID of the symbolic tensor as an int.\n '
return self._graph_by_id[graph_id].get_tensor_id(op_name, output_slot)<|docstring|>Get the ID of a symbolic tensor.
Args:
graph_id: The ID of the immediately-enclosing graph.
op_name: Name of the op.
output_slot: Output slot as an int.
Returns:
The ID of the symbolic tensor as an int.<|endoftext|> |
08b2a86f227ece5318c2925618db20d503adc852f4f1ef16bdf562339c46ab92 | def graph_execution_trace_to_tensor_id(self, trace):
'Get symbolic tensor ID from a GraphExecutoinTraceDigest object.'
return self.symbolic_tensor_id(trace.graph_id, trace.op_name, trace.output_slot) | Get symbolic tensor ID from a GraphExecutoinTraceDigest object. | tensorflow/python/debug/lib/debug_events_reader.py | graph_execution_trace_to_tensor_id | Meteorix/tensorflow | 78 | python | def graph_execution_trace_to_tensor_id(self, trace):
return self.symbolic_tensor_id(trace.graph_id, trace.op_name, trace.output_slot) | def graph_execution_trace_to_tensor_id(self, trace):
return self.symbolic_tensor_id(trace.graph_id, trace.op_name, trace.output_slot)<|docstring|>Get symbolic tensor ID from a GraphExecutoinTraceDigest object.<|endoftext|> |
69bb83864f89d180bec6f0c7b81b4dcc07260cd32d0d170a7f5177f30ac8e654 | def make_numba_random_fn(node, np_random_func):
'Create Numba implementations for existing Numba-supported ``np.random`` functions.\n\n The functions generated here add parameter broadcasting and the ``size``\n argument to the Numba-supported scalar ``np.random`` functions.\n '
tuple_size = get_vector_length(node.inputs[1])
size_dims = (tuple_size - max((i.ndim for i in node.inputs[3:])))
bcast_fn_name = f'aesara_random_{get_name_for_object(np_random_func)}'
sized_fn_name = 'sized_random_variable'
unique_names = unique_name_generator([bcast_fn_name, sized_fn_name, 'np', 'np_random_func', 'numba_vectorize', 'to_fixed_tuple', 'tuple_size', 'size_dims', 'rng', 'size', 'dtype'], suffix_sep='_')
bcast_fn_input_names = ', '.join([unique_names(i, force_unique=True) for i in node.inputs[3:]])
bcast_fn_global_env = {'np_random_func': np_random_func, 'numba_vectorize': numba.vectorize}
bcast_fn_src = f'''
@numba_vectorize
def {bcast_fn_name}({bcast_fn_input_names}):
return np_random_func({bcast_fn_input_names})
'''
bcast_fn = compile_function_src(bcast_fn_src, bcast_fn_name, bcast_fn_global_env)
random_fn_input_names = ', '.join((['rng', 'size', 'dtype'] + [unique_names(i) for i in node.inputs[3:]]))
out_dtype = node.outputs[1].type.numpy_dtype
random_fn_global_env = {bcast_fn_name: bcast_fn, 'out_dtype': out_dtype}
if (tuple_size > 0):
random_fn_body = dedent(f'''
size = to_fixed_tuple(size, tuple_size)
data = np.empty(size, dtype=out_dtype)
for i in np.ndindex(size[:size_dims]):
data[i] = {bcast_fn_name}({bcast_fn_input_names})
''')
random_fn_global_env.update({'np': np, 'to_fixed_tuple': numba_ndarray.to_fixed_tuple, 'tuple_size': tuple_size, 'size_dims': size_dims})
else:
random_fn_body = f'data = {bcast_fn_name}({bcast_fn_input_names})'
sized_fn_src = dedent(f'''
def {sized_fn_name}({random_fn_input_names}):
{indent(random_fn_body, (' ' * 4))}
return (rng, data)
''')
random_fn = compile_function_src(sized_fn_src, sized_fn_name, random_fn_global_env)
random_fn = numba.njit(random_fn)
return random_fn | Create Numba implementations for existing Numba-supported ``np.random`` functions.
The functions generated here add parameter broadcasting and the ``size``
argument to the Numba-supported scalar ``np.random`` functions. | aesara/link/numba/dispatch/random.py | make_numba_random_fn | anirudhacharya/aesara | 1 | python | def make_numba_random_fn(node, np_random_func):
'Create Numba implementations for existing Numba-supported ``np.random`` functions.\n\n The functions generated here add parameter broadcasting and the ``size``\n argument to the Numba-supported scalar ``np.random`` functions.\n '
tuple_size = get_vector_length(node.inputs[1])
size_dims = (tuple_size - max((i.ndim for i in node.inputs[3:])))
bcast_fn_name = f'aesara_random_{get_name_for_object(np_random_func)}'
sized_fn_name = 'sized_random_variable'
unique_names = unique_name_generator([bcast_fn_name, sized_fn_name, 'np', 'np_random_func', 'numba_vectorize', 'to_fixed_tuple', 'tuple_size', 'size_dims', 'rng', 'size', 'dtype'], suffix_sep='_')
bcast_fn_input_names = ', '.join([unique_names(i, force_unique=True) for i in node.inputs[3:]])
bcast_fn_global_env = {'np_random_func': np_random_func, 'numba_vectorize': numba.vectorize}
bcast_fn_src = f'
@numba_vectorize
def {bcast_fn_name}({bcast_fn_input_names}):
return np_random_func({bcast_fn_input_names})
'
bcast_fn = compile_function_src(bcast_fn_src, bcast_fn_name, bcast_fn_global_env)
random_fn_input_names = ', '.join((['rng', 'size', 'dtype'] + [unique_names(i) for i in node.inputs[3:]]))
out_dtype = node.outputs[1].type.numpy_dtype
random_fn_global_env = {bcast_fn_name: bcast_fn, 'out_dtype': out_dtype}
if (tuple_size > 0):
random_fn_body = dedent(f'
size = to_fixed_tuple(size, tuple_size)
data = np.empty(size, dtype=out_dtype)
for i in np.ndindex(size[:size_dims]):
data[i] = {bcast_fn_name}({bcast_fn_input_names})
')
random_fn_global_env.update({'np': np, 'to_fixed_tuple': numba_ndarray.to_fixed_tuple, 'tuple_size': tuple_size, 'size_dims': size_dims})
else:
random_fn_body = f'data = {bcast_fn_name}({bcast_fn_input_names})'
sized_fn_src = dedent(f'
def {sized_fn_name}({random_fn_input_names}):
{indent(random_fn_body, (' ' * 4))}
return (rng, data)
')
random_fn = compile_function_src(sized_fn_src, sized_fn_name, random_fn_global_env)
random_fn = numba.njit(random_fn)
return random_fn | def make_numba_random_fn(node, np_random_func):
'Create Numba implementations for existing Numba-supported ``np.random`` functions.\n\n The functions generated here add parameter broadcasting and the ``size``\n argument to the Numba-supported scalar ``np.random`` functions.\n '
tuple_size = get_vector_length(node.inputs[1])
size_dims = (tuple_size - max((i.ndim for i in node.inputs[3:])))
bcast_fn_name = f'aesara_random_{get_name_for_object(np_random_func)}'
sized_fn_name = 'sized_random_variable'
unique_names = unique_name_generator([bcast_fn_name, sized_fn_name, 'np', 'np_random_func', 'numba_vectorize', 'to_fixed_tuple', 'tuple_size', 'size_dims', 'rng', 'size', 'dtype'], suffix_sep='_')
bcast_fn_input_names = ', '.join([unique_names(i, force_unique=True) for i in node.inputs[3:]])
bcast_fn_global_env = {'np_random_func': np_random_func, 'numba_vectorize': numba.vectorize}
bcast_fn_src = f'
@numba_vectorize
def {bcast_fn_name}({bcast_fn_input_names}):
return np_random_func({bcast_fn_input_names})
'
bcast_fn = compile_function_src(bcast_fn_src, bcast_fn_name, bcast_fn_global_env)
random_fn_input_names = ', '.join((['rng', 'size', 'dtype'] + [unique_names(i) for i in node.inputs[3:]]))
out_dtype = node.outputs[1].type.numpy_dtype
random_fn_global_env = {bcast_fn_name: bcast_fn, 'out_dtype': out_dtype}
if (tuple_size > 0):
random_fn_body = dedent(f'
size = to_fixed_tuple(size, tuple_size)
data = np.empty(size, dtype=out_dtype)
for i in np.ndindex(size[:size_dims]):
data[i] = {bcast_fn_name}({bcast_fn_input_names})
')
random_fn_global_env.update({'np': np, 'to_fixed_tuple': numba_ndarray.to_fixed_tuple, 'tuple_size': tuple_size, 'size_dims': size_dims})
else:
random_fn_body = f'data = {bcast_fn_name}({bcast_fn_input_names})'
sized_fn_src = dedent(f'
def {sized_fn_name}({random_fn_input_names}):
{indent(random_fn_body, (' ' * 4))}
return (rng, data)
')
random_fn = compile_function_src(sized_fn_src, sized_fn_name, random_fn_global_env)
random_fn = numba.njit(random_fn)
return random_fn<|docstring|>Create Numba implementations for existing Numba-supported ``np.random`` functions.
The functions generated here add parameter broadcasting and the ``size``
argument to the Numba-supported scalar ``np.random`` functions.<|endoftext|> |
8a300a57cadbcac2b677149a85b5164c9c746ec874a8e5cea3a29e0eba583f89 | def create_numba_random_fn(op: Op, node: Apply, scalar_fn: Callable[([str], str)], global_env: Optional[Dict[(str, Any)]]=None) -> Callable:
'Create a vectorized function from a callable that generates the ``str`` function body.\n\n TODO: This could/should be generalized for other simple function\n construction cases that need unique-ified symbol names.\n '
np_random_fn_name = f'aesara_random_{get_name_for_object(op.name)}'
if global_env:
np_global_env = global_env.copy()
else:
np_global_env = {}
np_global_env['np'] = np
np_global_env['numba_vectorize'] = numba.vectorize
unique_names = unique_name_generator((([np_random_fn_name] + list(np_global_env.keys())) + ['rng', 'size', 'dtype']), suffix_sep='_')
np_names = [unique_names(i, force_unique=True) for i in node.inputs[3:]]
np_input_names = ', '.join(np_names)
np_random_fn_src = f'''
@numba_vectorize
def {np_random_fn_name}({np_input_names}):
{scalar_fn(*np_names)}
'''
np_random_fn = compile_function_src(np_random_fn_src, np_random_fn_name, np_global_env)
return make_numba_random_fn(node, np_random_fn) | Create a vectorized function from a callable that generates the ``str`` function body.
TODO: This could/should be generalized for other simple function
construction cases that need unique-ified symbol names. | aesara/link/numba/dispatch/random.py | create_numba_random_fn | anirudhacharya/aesara | 1 | python | def create_numba_random_fn(op: Op, node: Apply, scalar_fn: Callable[([str], str)], global_env: Optional[Dict[(str, Any)]]=None) -> Callable:
'Create a vectorized function from a callable that generates the ``str`` function body.\n\n TODO: This could/should be generalized for other simple function\n construction cases that need unique-ified symbol names.\n '
np_random_fn_name = f'aesara_random_{get_name_for_object(op.name)}'
if global_env:
np_global_env = global_env.copy()
else:
np_global_env = {}
np_global_env['np'] = np
np_global_env['numba_vectorize'] = numba.vectorize
unique_names = unique_name_generator((([np_random_fn_name] + list(np_global_env.keys())) + ['rng', 'size', 'dtype']), suffix_sep='_')
np_names = [unique_names(i, force_unique=True) for i in node.inputs[3:]]
np_input_names = ', '.join(np_names)
np_random_fn_src = f'
@numba_vectorize
def {np_random_fn_name}({np_input_names}):
{scalar_fn(*np_names)}
'
np_random_fn = compile_function_src(np_random_fn_src, np_random_fn_name, np_global_env)
return make_numba_random_fn(node, np_random_fn) | def create_numba_random_fn(op: Op, node: Apply, scalar_fn: Callable[([str], str)], global_env: Optional[Dict[(str, Any)]]=None) -> Callable:
'Create a vectorized function from a callable that generates the ``str`` function body.\n\n TODO: This could/should be generalized for other simple function\n construction cases that need unique-ified symbol names.\n '
np_random_fn_name = f'aesara_random_{get_name_for_object(op.name)}'
if global_env:
np_global_env = global_env.copy()
else:
np_global_env = {}
np_global_env['np'] = np
np_global_env['numba_vectorize'] = numba.vectorize
unique_names = unique_name_generator((([np_random_fn_name] + list(np_global_env.keys())) + ['rng', 'size', 'dtype']), suffix_sep='_')
np_names = [unique_names(i, force_unique=True) for i in node.inputs[3:]]
np_input_names = ', '.join(np_names)
np_random_fn_src = f'
@numba_vectorize
def {np_random_fn_name}({np_input_names}):
{scalar_fn(*np_names)}
'
np_random_fn = compile_function_src(np_random_fn_src, np_random_fn_name, np_global_env)
return make_numba_random_fn(node, np_random_fn)<|docstring|>Create a vectorized function from a callable that generates the ``str`` function body.
TODO: This could/should be generalized for other simple function
construction cases that need unique-ified symbol names.<|endoftext|> |
e42706d249b02dcd4c563cbfe2a04bf0a3609772eccc8c372508ee006c3b35cb | def read_coverages(input_file):
'Return coverage values.'
coverages = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
coverages.append(float(line.rstrip()))
return coverages | Return coverage values. | scripts/subsample-single-sample.py | read_coverages | rpetit3/anthrax-metagenome-study | 0 | python | def read_coverages(input_file):
coverages = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
coverages.append(float(line.rstrip()))
return coverages | def read_coverages(input_file):
coverages = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
coverages.append(float(line.rstrip()))
return coverages<|docstring|>Return coverage values.<|endoftext|> |
8712be2ee0aba7f8647eb26c0eecd0db33eeb6997b71c3d4b9805d6fb7a997bb | def read_sequences(input_file, min_length=None):
'Return lines in a text file as a list.'
lines = []
lengths = []
total = 1
with open(input_file, 'r') as input_handle:
for line in input_handle:
line = line.rstrip()
length = len(line)
if min_length:
if (length >= min_length):
lines.append(line)
if (total <= 100000):
lengths.append(length)
total += 1
else:
lines.append(line)
lengths.append(length)
if (total <= 100000):
lengths.append(length)
length_stats = get_coverage_stats(lengths)
return [lines, int(length_stats['mean'])] | Return lines in a text file as a list. | scripts/subsample-single-sample.py | read_sequences | rpetit3/anthrax-metagenome-study | 0 | python | def read_sequences(input_file, min_length=None):
lines = []
lengths = []
total = 1
with open(input_file, 'r') as input_handle:
for line in input_handle:
line = line.rstrip()
length = len(line)
if min_length:
if (length >= min_length):
lines.append(line)
if (total <= 100000):
lengths.append(length)
total += 1
else:
lines.append(line)
lengths.append(length)
if (total <= 100000):
lengths.append(length)
length_stats = get_coverage_stats(lengths)
return [lines, int(length_stats['mean'])] | def read_sequences(input_file, min_length=None):
lines = []
lengths = []
total = 1
with open(input_file, 'r') as input_handle:
for line in input_handle:
line = line.rstrip()
length = len(line)
if min_length:
if (length >= min_length):
lines.append(line)
if (total <= 100000):
lengths.append(length)
total += 1
else:
lines.append(line)
lengths.append(length)
if (total <= 100000):
lengths.append(length)
length_stats = get_coverage_stats(lengths)
return [lines, int(length_stats['mean'])]<|docstring|>Return lines in a text file as a list.<|endoftext|> |
42e22653bce4ef890f2086ab515eed83d9db910797b25a0e64fcb138f696700e | def get_coverage_stats(coverage):
'Return summary stats of a set of coverages.'
np_array = np.array(coverage)
return {'min': (min(coverage) if coverage else 0), 'median': (int(np.median(np_array)) if coverage else 0), 'mean': (np.mean(np_array) if coverage else 0), 'max': (max(coverage) if coverage else 0)} | Return summary stats of a set of coverages. | scripts/subsample-single-sample.py | get_coverage_stats | rpetit3/anthrax-metagenome-study | 0 | python | def get_coverage_stats(coverage):
np_array = np.array(coverage)
return {'min': (min(coverage) if coverage else 0), 'median': (int(np.median(np_array)) if coverage else 0), 'mean': (np.mean(np_array) if coverage else 0), 'max': (max(coverage) if coverage else 0)} | def get_coverage_stats(coverage):
np_array = np.array(coverage)
return {'min': (min(coverage) if coverage else 0), 'median': (int(np.median(np_array)) if coverage else 0), 'mean': (np.mean(np_array) if coverage else 0), 'max': (max(coverage) if coverage else 0)}<|docstring|>Return summary stats of a set of coverages.<|endoftext|> |
f0b801d30afec10f9bf4f6d440dee01e663e1f20901f4b44b4d5cca878cc3666 | def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False, shell=False):
'Execute a single command and return STDOUT and STDERR.'
(stdout, stdout_str) = output_handler(stdout)
(stderr, stderr_str) = output_handler(stderr, redirect='2>')
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd, shell=shell)
return p.communicate() | Execute a single command and return STDOUT and STDERR. | scripts/subsample-single-sample.py | run_command | rpetit3/anthrax-metagenome-study | 0 | python | def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False, shell=False):
(stdout, stdout_str) = output_handler(stdout)
(stderr, stderr_str) = output_handler(stderr, redirect='2>')
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd, shell=shell)
return p.communicate() | def run_command(cmd, cwd=os.getcwd(), stdout=False, stderr=False, shell=False):
(stdout, stdout_str) = output_handler(stdout)
(stderr, stderr_str) = output_handler(stderr, redirect='2>')
p = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, cwd=cwd, shell=shell)
return p.communicate()<|docstring|>Execute a single command and return STDOUT and STDERR.<|endoftext|> |
19e23a37edf0f52e0533aa8a4943f683b0aaba987c747b68a8eaec7ae8e9150f | def subsample(opts):
'Subsample coverages.'
working_dir = opts[0]
output_dir = opts[1]
coverage = opts[2]
replicate = opts[3]
reads = 0
basename = 'replicate-{0:03d}'.format(replicate)
if (not os.path.exists('{0}/{1}-lef.txt.gz'.format(output_dir, basename))):
fasta = []
fasta_output = '{0}/{1}.fasta'.format(working_dir, basename)
random_seed = None
reads = int(((GENOME_SIZE * float(coverage)) / LENGTH))
random_seed = ((int((coverage * 10000)) * replicate) + reads)
random.seed(random_seed)
for element in random.sample(range(TOTAL), reads):
fasta.append('>{0}\n'.format(element))
fasta.append('{0}\n'.format(SEQUENCES[element]))
with open(fasta_output, 'w') as fasta_handle:
fasta_handle.write(''.join(fasta))
jellyfish = '{0}/{1}.jf'.format(working_dir, basename)
run_command(['jellyfish', 'count', '-C', '-t', '4', '-m', '31', '-s', '5M', '-o', jellyfish, fasta_output])
run_command(['rm', fasta_output])
ba_txt = '{0}/{1}-ba.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BA_KMERS, '-o', ba_txt, jellyfish])
ba_hit = has_hit(ba_txt)
bcg_txt = '{0}/{1}-bcg.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BCG_KMERS, '-o', bcg_txt, jellyfish])
bcg_hit = has_hit(bcg_txt)
lef_txt = '{0}/{1}-lef.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', LEF_KMERS, '-o', lef_txt, jellyfish])
run_command(['rm', jellyfish])
if (ba_hit and bcg_hit):
print('\tSUCCESS: Test #: {0} Seed: {1} Reads: {2}'.format(replicate, random_seed, reads))
run_command(['gzip', '-f', bcg_txt])
run_command(['gzip', '-f', lef_txt])
run_command(['gzip', '-f', ba_txt])
run_command(['mv', '{0}.gz'.format(ba_txt), output_dir])
run_command(['mv', '{0}.gz'.format(bcg_txt), output_dir])
run_command(['mv', '{0}.gz'.format(lef_txt), output_dir])
else:
run_command(['rm', bcg_txt])
run_command(['rm', lef_txt])
run_command(['rm', ba_txt])
else:
print('\tSkipping replicate: {0}, already completed'.format(replicate)) | Subsample coverages. | scripts/subsample-single-sample.py | subsample | rpetit3/anthrax-metagenome-study | 0 | python | def subsample(opts):
working_dir = opts[0]
output_dir = opts[1]
coverage = opts[2]
replicate = opts[3]
reads = 0
basename = 'replicate-{0:03d}'.format(replicate)
if (not os.path.exists('{0}/{1}-lef.txt.gz'.format(output_dir, basename))):
fasta = []
fasta_output = '{0}/{1}.fasta'.format(working_dir, basename)
random_seed = None
reads = int(((GENOME_SIZE * float(coverage)) / LENGTH))
random_seed = ((int((coverage * 10000)) * replicate) + reads)
random.seed(random_seed)
for element in random.sample(range(TOTAL), reads):
fasta.append('>{0}\n'.format(element))
fasta.append('{0}\n'.format(SEQUENCES[element]))
with open(fasta_output, 'w') as fasta_handle:
fasta_handle.write(.join(fasta))
jellyfish = '{0}/{1}.jf'.format(working_dir, basename)
run_command(['jellyfish', 'count', '-C', '-t', '4', '-m', '31', '-s', '5M', '-o', jellyfish, fasta_output])
run_command(['rm', fasta_output])
ba_txt = '{0}/{1}-ba.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BA_KMERS, '-o', ba_txt, jellyfish])
ba_hit = has_hit(ba_txt)
bcg_txt = '{0}/{1}-bcg.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BCG_KMERS, '-o', bcg_txt, jellyfish])
bcg_hit = has_hit(bcg_txt)
lef_txt = '{0}/{1}-lef.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', LEF_KMERS, '-o', lef_txt, jellyfish])
run_command(['rm', jellyfish])
if (ba_hit and bcg_hit):
print('\tSUCCESS: Test #: {0} Seed: {1} Reads: {2}'.format(replicate, random_seed, reads))
run_command(['gzip', '-f', bcg_txt])
run_command(['gzip', '-f', lef_txt])
run_command(['gzip', '-f', ba_txt])
run_command(['mv', '{0}.gz'.format(ba_txt), output_dir])
run_command(['mv', '{0}.gz'.format(bcg_txt), output_dir])
run_command(['mv', '{0}.gz'.format(lef_txt), output_dir])
else:
run_command(['rm', bcg_txt])
run_command(['rm', lef_txt])
run_command(['rm', ba_txt])
else:
print('\tSkipping replicate: {0}, already completed'.format(replicate)) | def subsample(opts):
working_dir = opts[0]
output_dir = opts[1]
coverage = opts[2]
replicate = opts[3]
reads = 0
basename = 'replicate-{0:03d}'.format(replicate)
if (not os.path.exists('{0}/{1}-lef.txt.gz'.format(output_dir, basename))):
fasta = []
fasta_output = '{0}/{1}.fasta'.format(working_dir, basename)
random_seed = None
reads = int(((GENOME_SIZE * float(coverage)) / LENGTH))
random_seed = ((int((coverage * 10000)) * replicate) + reads)
random.seed(random_seed)
for element in random.sample(range(TOTAL), reads):
fasta.append('>{0}\n'.format(element))
fasta.append('{0}\n'.format(SEQUENCES[element]))
with open(fasta_output, 'w') as fasta_handle:
fasta_handle.write(.join(fasta))
jellyfish = '{0}/{1}.jf'.format(working_dir, basename)
run_command(['jellyfish', 'count', '-C', '-t', '4', '-m', '31', '-s', '5M', '-o', jellyfish, fasta_output])
run_command(['rm', fasta_output])
ba_txt = '{0}/{1}-ba.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BA_KMERS, '-o', ba_txt, jellyfish])
ba_hit = has_hit(ba_txt)
bcg_txt = '{0}/{1}-bcg.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', BCG_KMERS, '-o', bcg_txt, jellyfish])
bcg_hit = has_hit(bcg_txt)
lef_txt = '{0}/{1}-lef.txt'.format(working_dir, basename)
run_command(['jellyfish', 'query', '-s', LEF_KMERS, '-o', lef_txt, jellyfish])
run_command(['rm', jellyfish])
if (ba_hit and bcg_hit):
print('\tSUCCESS: Test #: {0} Seed: {1} Reads: {2}'.format(replicate, random_seed, reads))
run_command(['gzip', '-f', bcg_txt])
run_command(['gzip', '-f', lef_txt])
run_command(['gzip', '-f', ba_txt])
run_command(['mv', '{0}.gz'.format(ba_txt), output_dir])
run_command(['mv', '{0}.gz'.format(bcg_txt), output_dir])
run_command(['mv', '{0}.gz'.format(lef_txt), output_dir])
else:
run_command(['rm', bcg_txt])
run_command(['rm', lef_txt])
run_command(['rm', ba_txt])
else:
print('\tSkipping replicate: {0}, already completed'.format(replicate))<|docstring|>Subsample coverages.<|endoftext|> |
947b692e57749b999be1f449ee515924f5b762e322baaedcbb0425d91c8bf47a | def _apply_hierarchy(self, cc_dct, age, sex):
'Returns a list of HCCs after applying hierarchy and age/sex edit\n '
cc_lst_all = []
for (dx, cc_lst) in cc_dct.items():
cc_lst_all += [cc for cc in cc_lst if (cc != 'HCCNA')]
cc_cnt = Counter(set(cc_lst_all))
for (k, v) in self.hier.items():
if (k in cc_cnt):
for v_i in v:
cc_cnt[v_i] -= 1
cc_lst_unique = [k for (k, v) in cc_cnt.items() if (v > 0)]
return cc_lst_unique | Returns a list of HCCs after applying hierarchy and age/sex edit | hccpy/hcc.py | _apply_hierarchy | Navina-ai/hccpy | 0 | python | def _apply_hierarchy(self, cc_dct, age, sex):
'\n '
cc_lst_all = []
for (dx, cc_lst) in cc_dct.items():
cc_lst_all += [cc for cc in cc_lst if (cc != 'HCCNA')]
cc_cnt = Counter(set(cc_lst_all))
for (k, v) in self.hier.items():
if (k in cc_cnt):
for v_i in v:
cc_cnt[v_i] -= 1
cc_lst_unique = [k for (k, v) in cc_cnt.items() if (v > 0)]
return cc_lst_unique | def _apply_hierarchy(self, cc_dct, age, sex):
'\n '
cc_lst_all = []
for (dx, cc_lst) in cc_dct.items():
cc_lst_all += [cc for cc in cc_lst if (cc != 'HCCNA')]
cc_cnt = Counter(set(cc_lst_all))
for (k, v) in self.hier.items():
if (k in cc_cnt):
for v_i in v:
cc_cnt[v_i] -= 1
cc_lst_unique = [k for (k, v) in cc_cnt.items() if (v > 0)]
return cc_lst_unique<|docstring|>Returns a list of HCCs after applying hierarchy and age/sex edit<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.