repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
apragacz/django-rest-registration | rest_registration/utils/users.py | get_object_or_404 | def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
This function was copied from rest_framework.generics because of issue #36.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404 | python | def get_object_or_404(queryset, *filter_args, **filter_kwargs):
"""
Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
This function was copied from rest_framework.generics because of issue #36.
"""
try:
return _get_object_or_404(queryset, *filter_args, **filter_kwargs)
except (TypeError, ValueError, ValidationError):
raise Http404 | [
"def",
"get_object_or_404",
"(",
"queryset",
",",
"*",
"filter_args",
",",
"*",
"*",
"filter_kwargs",
")",
":",
"try",
":",
"return",
"_get_object_or_404",
"(",
"queryset",
",",
"*",
"filter_args",
",",
"*",
"*",
"filter_kwargs",
")",
"except",
"(",
"TypeErr... | Same as Django's standard shortcut, but make sure to also raise 404
if the filter_kwargs don't match the required types.
This function was copied from rest_framework.generics because of issue #36. | [
"Same",
"as",
"Django",
"s",
"standard",
"shortcut",
"but",
"make",
"sure",
"to",
"also",
"raise",
"404",
"if",
"the",
"filter_kwargs",
"don",
"t",
"match",
"the",
"required",
"types",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/utils/users.py#L13-L23 | train | 228,000 |
apragacz/django-rest-registration | rest_registration/api/views/profile.py | profile | def profile(request):
'''
Get or set user profile.
'''
serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS
if request.method in ['POST', 'PUT', 'PATCH']:
partial = request.method == 'PATCH'
serializer = serializer_class(
instance=request.user,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
serializer.save()
else: # request.method == 'GET':
serializer = serializer_class(instance=request.user)
return Response(serializer.data) | python | def profile(request):
'''
Get or set user profile.
'''
serializer_class = registration_settings.PROFILE_SERIALIZER_CLASS
if request.method in ['POST', 'PUT', 'PATCH']:
partial = request.method == 'PATCH'
serializer = serializer_class(
instance=request.user,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
serializer.save()
else: # request.method == 'GET':
serializer = serializer_class(instance=request.user)
return Response(serializer.data) | [
"def",
"profile",
"(",
"request",
")",
":",
"serializer_class",
"=",
"registration_settings",
".",
"PROFILE_SERIALIZER_CLASS",
"if",
"request",
".",
"method",
"in",
"[",
"'POST'",
",",
"'PUT'",
",",
"'PATCH'",
"]",
":",
"partial",
"=",
"request",
".",
"method"... | Get or set user profile. | [
"Get",
"or",
"set",
"user",
"profile",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/profile.py#L13-L30 | train | 228,001 |
apragacz/django-rest-registration | rest_registration/api/views/register.py | register | def register(request):
'''
Register new user.
'''
serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
kwargs = {}
if registration_settings.REGISTER_VERIFICATION_ENABLED:
verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD')
kwargs[verification_flag_field] = False
email_field = get_user_setting('EMAIL_FIELD')
if (email_field not in serializer.validated_data
or not serializer.validated_data[email_field]):
raise BadRequest("User without email cannot be verified")
user = serializer.save(**kwargs)
output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS # noqa: E501
output_serializer = output_serializer_class(instance=user)
user_data = output_serializer.data
if registration_settings.REGISTER_VERIFICATION_ENABLED:
signer = RegisterSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return Response(user_data, status=status.HTTP_201_CREATED) | python | def register(request):
'''
Register new user.
'''
serializer_class = registration_settings.REGISTER_SERIALIZER_CLASS
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
kwargs = {}
if registration_settings.REGISTER_VERIFICATION_ENABLED:
verification_flag_field = get_user_setting('VERIFICATION_FLAG_FIELD')
kwargs[verification_flag_field] = False
email_field = get_user_setting('EMAIL_FIELD')
if (email_field not in serializer.validated_data
or not serializer.validated_data[email_field]):
raise BadRequest("User without email cannot be verified")
user = serializer.save(**kwargs)
output_serializer_class = registration_settings.REGISTER_OUTPUT_SERIALIZER_CLASS # noqa: E501
output_serializer = output_serializer_class(instance=user)
user_data = output_serializer.data
if registration_settings.REGISTER_VERIFICATION_ENABLED:
signer = RegisterSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.REGISTER_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return Response(user_data, status=status.HTTP_201_CREATED) | [
"def",
"register",
"(",
"request",
")",
":",
"serializer_class",
"=",
"registration_settings",
".",
"REGISTER_SERIALIZER_CLASS",
"serializer",
"=",
"serializer_class",
"(",
"data",
"=",
"request",
".",
"data",
")",
"serializer",
".",
"is_valid",
"(",
"raise_exceptio... | Register new user. | [
"Register",
"new",
"user",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register.py#L54-L86 | train | 228,002 |
apragacz/django-rest-registration | rest_registration/api/views/register.py | verify_registration | def verify_registration(request):
"""
Verify registration via signature.
"""
user = process_verify_registration_data(request.data)
extra_data = None
if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN:
extra_data = perform_login(request, user)
return get_ok_response('User verified successfully', extra_data=extra_data) | python | def verify_registration(request):
"""
Verify registration via signature.
"""
user = process_verify_registration_data(request.data)
extra_data = None
if registration_settings.REGISTER_VERIFICATION_AUTO_LOGIN:
extra_data = perform_login(request, user)
return get_ok_response('User verified successfully', extra_data=extra_data) | [
"def",
"verify_registration",
"(",
"request",
")",
":",
"user",
"=",
"process_verify_registration_data",
"(",
"request",
".",
"data",
")",
"extra_data",
"=",
"None",
"if",
"registration_settings",
".",
"REGISTER_VERIFICATION_AUTO_LOGIN",
":",
"extra_data",
"=",
"perfo... | Verify registration via signature. | [
"Verify",
"registration",
"via",
"signature",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register.py#L98-L106 | train | 228,003 |
apragacz/django-rest-registration | setup.py | get_requirements | def get_requirements(requirements_filepath):
'''
Return list of this package requirements via local filepath.
'''
requirements = []
with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f:
for line in f:
if line.startswith('#'):
continue
line = line.rstrip()
if not line:
continue
requirements.append(line)
return requirements | python | def get_requirements(requirements_filepath):
'''
Return list of this package requirements via local filepath.
'''
requirements = []
with open(os.path.join(ROOT_DIR, requirements_filepath), 'rt') as f:
for line in f:
if line.startswith('#'):
continue
line = line.rstrip()
if not line:
continue
requirements.append(line)
return requirements | [
"def",
"get_requirements",
"(",
"requirements_filepath",
")",
":",
"requirements",
"=",
"[",
"]",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ROOT_DIR",
",",
"requirements_filepath",
")",
",",
"'rt'",
")",
"as",
"f",
":",
"for",
"line",
"i... | Return list of this package requirements via local filepath. | [
"Return",
"list",
"of",
"this",
"package",
"requirements",
"via",
"local",
"filepath",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/setup.py#L15-L28 | train | 228,004 |
apragacz/django-rest-registration | rest_registration/api/views/reset_password.py | send_reset_password_link | def send_reset_password_link(request):
'''
Send email with reset password link.
'''
if not registration_settings.RESET_PASSWORD_VERIFICATION_ENABLED:
raise Http404()
serializer = SendResetPasswordLinkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
login = serializer.validated_data['login']
user = None
for login_field in get_login_fields():
user = get_user_by_lookup_dict(
{login_field: login}, default=None, require_verified=False)
if user:
break
if not user:
raise UserNotFound()
signer = ResetPasswordSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return get_ok_response('Reset link sent') | python | def send_reset_password_link(request):
'''
Send email with reset password link.
'''
if not registration_settings.RESET_PASSWORD_VERIFICATION_ENABLED:
raise Http404()
serializer = SendResetPasswordLinkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
login = serializer.validated_data['login']
user = None
for login_field in get_login_fields():
user = get_user_by_lookup_dict(
{login_field: login}, default=None, require_verified=False)
if user:
break
if not user:
raise UserNotFound()
signer = ResetPasswordSigner({
'user_id': user.pk,
}, request=request)
template_config = (
registration_settings.RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES)
send_verification_notification(user, signer, template_config)
return get_ok_response('Reset link sent') | [
"def",
"send_reset_password_link",
"(",
"request",
")",
":",
"if",
"not",
"registration_settings",
".",
"RESET_PASSWORD_VERIFICATION_ENABLED",
":",
"raise",
"Http404",
"(",
")",
"serializer",
"=",
"SendResetPasswordLinkSerializer",
"(",
"data",
"=",
"request",
".",
"d... | Send email with reset password link. | [
"Send",
"email",
"with",
"reset",
"password",
"link",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/reset_password.py#L61-L89 | train | 228,005 |
apragacz/django-rest-registration | rest_registration/api/views/register_email.py | register_email | def register_email(request):
'''
Register new email.
'''
user = request.user
serializer = RegisterEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data['email']
template_config = (
registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES)
if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED:
signer = RegisterEmailSigner({
'user_id': user.pk,
'email': email,
}, request=request)
send_verification_notification(
user, signer, template_config, email=email)
else:
email_field = get_user_setting('EMAIL_FIELD')
setattr(user, email_field, email)
user.save()
return get_ok_response('Register email link email sent') | python | def register_email(request):
'''
Register new email.
'''
user = request.user
serializer = RegisterEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data['email']
template_config = (
registration_settings.REGISTER_EMAIL_VERIFICATION_EMAIL_TEMPLATES)
if registration_settings.REGISTER_EMAIL_VERIFICATION_ENABLED:
signer = RegisterEmailSigner({
'user_id': user.pk,
'email': email,
}, request=request)
send_verification_notification(
user, signer, template_config, email=email)
else:
email_field = get_user_setting('EMAIL_FIELD')
setattr(user, email_field, email)
user.save()
return get_ok_response('Register email link email sent') | [
"def",
"register_email",
"(",
"request",
")",
":",
"user",
"=",
"request",
".",
"user",
"serializer",
"=",
"RegisterEmailSerializer",
"(",
"data",
"=",
"request",
".",
"data",
")",
"serializer",
".",
"is_valid",
"(",
"raise_exception",
"=",
"True",
")",
"ema... | Register new email. | [
"Register",
"new",
"email",
"."
] | 7373571264dd567c2a73a97ff4c45b64f113605b | https://github.com/apragacz/django-rest-registration/blob/7373571264dd567c2a73a97ff4c45b64f113605b/rest_registration/api/views/register_email.py#L33-L58 | train | 228,006 |
nschloe/matplotlib2tikz | matplotlib2tikz/axes.py | _is_colorbar_heuristic | def _is_colorbar_heuristic(obj):
"""Find out if the object is in fact a color bar.
"""
# TODO come up with something more accurate here
# Might help:
# TODO Are the colorbars exactly the l.collections.PolyCollection's?
try:
aspect = float(obj.get_aspect())
except ValueError:
# e.g., aspect == 'equal'
return False
# Assume that something is a colorbar if and only if the ratio is above 5.0
# and there are no ticks on the corresponding axis. This isn't always true,
# though: The ratio of a color can be freely adjusted by the aspect
# keyword, e.g.,
#
# plt.colorbar(im, aspect=5)
#
limit_ratio = 5.0
return (aspect >= limit_ratio and len(obj.get_xticks()) == 0) or (
aspect <= 1.0 / limit_ratio and len(obj.get_yticks()) == 0
) | python | def _is_colorbar_heuristic(obj):
"""Find out if the object is in fact a color bar.
"""
# TODO come up with something more accurate here
# Might help:
# TODO Are the colorbars exactly the l.collections.PolyCollection's?
try:
aspect = float(obj.get_aspect())
except ValueError:
# e.g., aspect == 'equal'
return False
# Assume that something is a colorbar if and only if the ratio is above 5.0
# and there are no ticks on the corresponding axis. This isn't always true,
# though: The ratio of a color can be freely adjusted by the aspect
# keyword, e.g.,
#
# plt.colorbar(im, aspect=5)
#
limit_ratio = 5.0
return (aspect >= limit_ratio and len(obj.get_xticks()) == 0) or (
aspect <= 1.0 / limit_ratio and len(obj.get_yticks()) == 0
) | [
"def",
"_is_colorbar_heuristic",
"(",
"obj",
")",
":",
"# TODO come up with something more accurate here",
"# Might help:",
"# TODO Are the colorbars exactly the l.collections.PolyCollection's?",
"try",
":",
"aspect",
"=",
"float",
"(",
"obj",
".",
"get_aspect",
"(",
")",
")"... | Find out if the object is in fact a color bar. | [
"Find",
"out",
"if",
"the",
"object",
"is",
"in",
"fact",
"a",
"color",
"bar",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L582-L605 | train | 228,007 |
nschloe/matplotlib2tikz | matplotlib2tikz/axes.py | _mpl_cmap2pgf_cmap | def _mpl_cmap2pgf_cmap(cmap, data):
"""Converts a color map as given in matplotlib to a color map as
represented in PGFPlots.
"""
if isinstance(cmap, mpl.colors.LinearSegmentedColormap):
return _handle_linear_segmented_color_map(cmap, data)
assert isinstance(
cmap, mpl.colors.ListedColormap
), "Only LinearSegmentedColormap and ListedColormap are supported"
return _handle_listed_color_map(cmap, data) | python | def _mpl_cmap2pgf_cmap(cmap, data):
"""Converts a color map as given in matplotlib to a color map as
represented in PGFPlots.
"""
if isinstance(cmap, mpl.colors.LinearSegmentedColormap):
return _handle_linear_segmented_color_map(cmap, data)
assert isinstance(
cmap, mpl.colors.ListedColormap
), "Only LinearSegmentedColormap and ListedColormap are supported"
return _handle_listed_color_map(cmap, data) | [
"def",
"_mpl_cmap2pgf_cmap",
"(",
"cmap",
",",
"data",
")",
":",
"if",
"isinstance",
"(",
"cmap",
",",
"mpl",
".",
"colors",
".",
"LinearSegmentedColormap",
")",
":",
"return",
"_handle_linear_segmented_color_map",
"(",
"cmap",
",",
"data",
")",
"assert",
"isi... | Converts a color map as given in matplotlib to a color map as
represented in PGFPlots. | [
"Converts",
"a",
"color",
"map",
"as",
"given",
"in",
"matplotlib",
"to",
"a",
"color",
"map",
"as",
"represented",
"in",
"PGFPlots",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L608-L618 | train | 228,008 |
nschloe/matplotlib2tikz | matplotlib2tikz/axes.py | _scale_to_int | def _scale_to_int(X, max_val=None):
"""
Scales the array X such that it contains only integers.
"""
if max_val is None:
X = X / _gcd_array(X)
else:
X = X / max(1 / max_val, _gcd_array(X))
return [int(entry) for entry in X] | python | def _scale_to_int(X, max_val=None):
"""
Scales the array X such that it contains only integers.
"""
if max_val is None:
X = X / _gcd_array(X)
else:
X = X / max(1 / max_val, _gcd_array(X))
return [int(entry) for entry in X] | [
"def",
"_scale_to_int",
"(",
"X",
",",
"max_val",
"=",
"None",
")",
":",
"if",
"max_val",
"is",
"None",
":",
"X",
"=",
"X",
"/",
"_gcd_array",
"(",
"X",
")",
"else",
":",
"X",
"=",
"X",
"/",
"max",
"(",
"1",
"/",
"max_val",
",",
"_gcd_array",
"... | Scales the array X such that it contains only integers. | [
"Scales",
"the",
"array",
"X",
"such",
"that",
"it",
"contains",
"only",
"integers",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L771-L780 | train | 228,009 |
nschloe/matplotlib2tikz | matplotlib2tikz/axes.py | _gcd_array | def _gcd_array(X):
"""
Return the largest real value h such that all elements in x are integer
multiples of h.
"""
greatest_common_divisor = 0.0
for x in X:
greatest_common_divisor = _gcd(greatest_common_divisor, x)
return greatest_common_divisor | python | def _gcd_array(X):
"""
Return the largest real value h such that all elements in x are integer
multiples of h.
"""
greatest_common_divisor = 0.0
for x in X:
greatest_common_divisor = _gcd(greatest_common_divisor, x)
return greatest_common_divisor | [
"def",
"_gcd_array",
"(",
"X",
")",
":",
"greatest_common_divisor",
"=",
"0.0",
"for",
"x",
"in",
"X",
":",
"greatest_common_divisor",
"=",
"_gcd",
"(",
"greatest_common_divisor",
",",
"x",
")",
"return",
"greatest_common_divisor"
] | Return the largest real value h such that all elements in x are integer
multiples of h. | [
"Return",
"the",
"largest",
"real",
"value",
"h",
"such",
"that",
"all",
"elements",
"in",
"x",
"are",
"integer",
"multiples",
"of",
"h",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/axes.py#L783-L792 | train | 228,010 |
nschloe/matplotlib2tikz | matplotlib2tikz/files.py | new_filename | def new_filename(data, file_kind, ext):
"""Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code.
"""
nb_key = file_kind + "number"
if nb_key not in data.keys():
data[nb_key] = -1
if not data["override externals"]:
# Make sure not to overwrite anything.
file_exists = True
while file_exists:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
file_exists = os.path.isfile(filename)
else:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
if data["rel data path"]:
rel_filepath = posixpath.join(data["rel data path"], name)
else:
rel_filepath = name
return filename, rel_filepath | python | def new_filename(data, file_kind, ext):
"""Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code.
"""
nb_key = file_kind + "number"
if nb_key not in data.keys():
data[nb_key] = -1
if not data["override externals"]:
# Make sure not to overwrite anything.
file_exists = True
while file_exists:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
file_exists = os.path.isfile(filename)
else:
data[nb_key] = data[nb_key] + 1
filename, name = _gen_filename(data, nb_key, ext)
if data["rel data path"]:
rel_filepath = posixpath.join(data["rel data path"], name)
else:
rel_filepath = name
return filename, rel_filepath | [
"def",
"new_filename",
"(",
"data",
",",
"file_kind",
",",
"ext",
")",
":",
"nb_key",
"=",
"file_kind",
"+",
"\"number\"",
"if",
"nb_key",
"not",
"in",
"data",
".",
"keys",
"(",
")",
":",
"data",
"[",
"nb_key",
"]",
"=",
"-",
"1",
"if",
"not",
"dat... | Returns an available filename.
:param file_kind: Name under which numbering is recorded, such as 'img' or
'table'.
:type file_kind: str
:param ext: Filename extension.
:type ext: str
:returns: (filename, rel_filepath) where filename is a path in the
filesystem and rel_filepath is the path to be used in the tex
code. | [
"Returns",
"an",
"available",
"filename",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/files.py#L12-L47 | train | 228,011 |
nschloe/matplotlib2tikz | matplotlib2tikz/path.py | mpl_linestyle2pgfplots_linestyle | def mpl_linestyle2pgfplots_linestyle(line_style, line=None):
"""Translates a line style of matplotlib to the corresponding style
in PGFPlots.
"""
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq
# is an even length tuple of on and off ink in points.
#
# solid: [(None, None), (None, None), ..., (None, None)]
# dashed: (0, (6.0, 6.0))
# dotted: (0, (1.0, 3.0))
# dashdot: (0, (3.0, 5.0, 1.0, 5.0))
if isinstance(line_style, tuple):
if line_style[0] is None:
return None
if len(line_style[1]) == 2:
return "dash pattern=on {}pt off {}pt".format(*line_style[1])
assert len(line_style[1]) == 4
return "dash pattern=on {}pt off {}pt on {}pt off {}pt".format(*line_style[1])
if isinstance(line, mpl.lines.Line2D) and line.is_dashed():
# see matplotlib.lines.Line2D.set_dashes
# get defaults
default_dashOffset, default_dashSeq = mpl.lines._get_dash_pattern(line_style)
# get dash format of line under test
dashSeq = line._us_dashSeq
dashOffset = line._us_dashOffset
lst = list()
if dashSeq != default_dashSeq:
# generate own dash sequence
format_string = " ".join(len(dashSeq) // 2 * ["on {}pt off {}pt"])
lst.append("dash pattern=" + format_string.format(*dashSeq))
if dashOffset != default_dashOffset:
lst.append("dash phase={}pt".format(dashOffset))
if len(lst) > 0:
return ", ".join(lst)
return {
"": None,
"None": None,
"none": None, # happens when using plt.boxplot()
"-": "solid",
"solid": "solid",
":": "dotted",
"--": "dashed",
"-.": "dash pattern=on 1pt off 3pt on 3pt off 3pt",
}[line_style] | python | def mpl_linestyle2pgfplots_linestyle(line_style, line=None):
"""Translates a line style of matplotlib to the corresponding style
in PGFPlots.
"""
# linestyle is a string or dash tuple. Legal string values are
# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq
# is an even length tuple of on and off ink in points.
#
# solid: [(None, None), (None, None), ..., (None, None)]
# dashed: (0, (6.0, 6.0))
# dotted: (0, (1.0, 3.0))
# dashdot: (0, (3.0, 5.0, 1.0, 5.0))
if isinstance(line_style, tuple):
if line_style[0] is None:
return None
if len(line_style[1]) == 2:
return "dash pattern=on {}pt off {}pt".format(*line_style[1])
assert len(line_style[1]) == 4
return "dash pattern=on {}pt off {}pt on {}pt off {}pt".format(*line_style[1])
if isinstance(line, mpl.lines.Line2D) and line.is_dashed():
# see matplotlib.lines.Line2D.set_dashes
# get defaults
default_dashOffset, default_dashSeq = mpl.lines._get_dash_pattern(line_style)
# get dash format of line under test
dashSeq = line._us_dashSeq
dashOffset = line._us_dashOffset
lst = list()
if dashSeq != default_dashSeq:
# generate own dash sequence
format_string = " ".join(len(dashSeq) // 2 * ["on {}pt off {}pt"])
lst.append("dash pattern=" + format_string.format(*dashSeq))
if dashOffset != default_dashOffset:
lst.append("dash phase={}pt".format(dashOffset))
if len(lst) > 0:
return ", ".join(lst)
return {
"": None,
"None": None,
"none": None, # happens when using plt.boxplot()
"-": "solid",
"solid": "solid",
":": "dotted",
"--": "dashed",
"-.": "dash pattern=on 1pt off 3pt on 3pt off 3pt",
}[line_style] | [
"def",
"mpl_linestyle2pgfplots_linestyle",
"(",
"line_style",
",",
"line",
"=",
"None",
")",
":",
"# linestyle is a string or dash tuple. Legal string values are",
"# solid|dashed|dashdot|dotted. The dash tuple is (offset, onoffseq) where onoffseq",
"# is an even length tuple of on and off ... | Translates a line style of matplotlib to the corresponding style
in PGFPlots. | [
"Translates",
"a",
"line",
"style",
"of",
"matplotlib",
"to",
"the",
"corresponding",
"style",
"in",
"PGFPlots",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/path.py#L296-L349 | train | 228,012 |
nschloe/matplotlib2tikz | matplotlib2tikz/quadmesh.py | draw_quadmesh | def draw_quadmesh(data, obj):
"""Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
"""
content = []
# Generate file name for current object
filename, rel_filepath = files.new_filename(data, "img", ".png")
# Get the dpi for rendering and store the original dpi of the figure
dpi = data["dpi"]
fig_dpi = obj.figure.get_dpi()
obj.figure.set_dpi(dpi)
# Render the object and save as png file
from matplotlib.backends.backend_agg import RendererAgg
cbox = obj.get_clip_box()
width = int(round(cbox.extents[2]))
height = int(round(cbox.extents[3]))
ren = RendererAgg(width, height, dpi)
obj.draw(ren)
# Generate a image from the render buffer
image = Image.frombuffer(
"RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1
)
# Crop the image to the actual content (removing the the regions otherwise
# used for axes, etc.)
# 'image.crop' expects the crop box to specify the left, upper, right, and
# lower pixel. 'cbox.extents' gives the left, lower, right, and upper
# pixel.
box = (
int(round(cbox.extents[0])),
0,
int(round(cbox.extents[2])),
int(round(cbox.extents[3] - cbox.extents[1])),
)
cropped = image.crop(box)
cropped.save(filename)
# Restore the original dpi of the figure
obj.figure.set_dpi(fig_dpi)
# write the corresponding information to the TikZ file
extent = obj.axes.get_xlim() + obj.axes.get_ylim()
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content | python | def draw_quadmesh(data, obj):
"""Returns the PGFPlots code for an graphics environment holding a
rendering of the object.
"""
content = []
# Generate file name for current object
filename, rel_filepath = files.new_filename(data, "img", ".png")
# Get the dpi for rendering and store the original dpi of the figure
dpi = data["dpi"]
fig_dpi = obj.figure.get_dpi()
obj.figure.set_dpi(dpi)
# Render the object and save as png file
from matplotlib.backends.backend_agg import RendererAgg
cbox = obj.get_clip_box()
width = int(round(cbox.extents[2]))
height = int(round(cbox.extents[3]))
ren = RendererAgg(width, height, dpi)
obj.draw(ren)
# Generate a image from the render buffer
image = Image.frombuffer(
"RGBA", ren.get_canvas_width_height(), ren.buffer_rgba(), "raw", "RGBA", 0, 1
)
# Crop the image to the actual content (removing the the regions otherwise
# used for axes, etc.)
# 'image.crop' expects the crop box to specify the left, upper, right, and
# lower pixel. 'cbox.extents' gives the left, lower, right, and upper
# pixel.
box = (
int(round(cbox.extents[0])),
0,
int(round(cbox.extents[2])),
int(round(cbox.extents[3] - cbox.extents[1])),
)
cropped = image.crop(box)
cropped.save(filename)
# Restore the original dpi of the figure
obj.figure.set_dpi(fig_dpi)
# write the corresponding information to the TikZ file
extent = obj.axes.get_xlim() + obj.axes.get_ylim()
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content | [
"def",
"draw_quadmesh",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"[",
"]",
"# Generate file name for current object",
"filename",
",",
"rel_filepath",
"=",
"files",
".",
"new_filename",
"(",
"data",
",",
"\"img\"",
",",
"\".png\"",
")",
"# Get the dpi ... | Returns the PGFPlots code for an graphics environment holding a
rendering of the object. | [
"Returns",
"the",
"PGFPlots",
"code",
"for",
"an",
"graphics",
"environment",
"holding",
"a",
"rendering",
"of",
"the",
"object",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/quadmesh.py#L8-L66 | train | 228,013 |
nschloe/matplotlib2tikz | matplotlib2tikz/color.py | mpl_color2xcolor | def mpl_color2xcolor(data, matplotlib_color):
"""Translates a matplotlib color specification into a proper LaTeX xcolor.
"""
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, "none", my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
# List white first such that for gray values, the combination
# white!<x>!black is preferred over, e.g., gray!<y>!black. Note that
# the order of the dictionary is respected from Python 3.6 on.
"white": numpy.array([1, 1, 1]),
"lightgray": numpy.array([0.75, 0.75, 0.75]),
"gray": numpy.array([0.5, 0.5, 0.5]),
"darkgray": numpy.array([0.25, 0.25, 0.25]),
"black": numpy.array([0, 0, 0]),
#
"red": numpy.array([1, 0, 0]),
"green": numpy.array([0, 1, 0]),
"blue": numpy.array([0, 0, 1]),
"brown": numpy.array([0.75, 0.5, 0.25]),
"lime": numpy.array([0.75, 1, 0]),
"orange": numpy.array([1, 0.5, 0]),
"pink": numpy.array([1, 0.75, 0.75]),
"purple": numpy.array([0.75, 0, 0.25]),
"teal": numpy.array([0, 0.5, 0.5]),
"violet": numpy.array([0.5, 0, 0.5]),
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data["custom colors"])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == "black":
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ("!{}!black".format(alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = "color" + str(len(data["custom colors"]))
data["custom colors"][xcol] = my_col[:3]
return data, xcol, my_col | python | def mpl_color2xcolor(data, matplotlib_color):
"""Translates a matplotlib color specification into a proper LaTeX xcolor.
"""
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, "none", my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
# List white first such that for gray values, the combination
# white!<x>!black is preferred over, e.g., gray!<y>!black. Note that
# the order of the dictionary is respected from Python 3.6 on.
"white": numpy.array([1, 1, 1]),
"lightgray": numpy.array([0.75, 0.75, 0.75]),
"gray": numpy.array([0.5, 0.5, 0.5]),
"darkgray": numpy.array([0.25, 0.25, 0.25]),
"black": numpy.array([0, 0, 0]),
#
"red": numpy.array([1, 0, 0]),
"green": numpy.array([0, 1, 0]),
"blue": numpy.array([0, 0, 1]),
"brown": numpy.array([0.75, 0.5, 0.25]),
"lime": numpy.array([0.75, 1, 0]),
"orange": numpy.array([1, 0.5, 0]),
"pink": numpy.array([1, 0.75, 0.75]),
"purple": numpy.array([0.75, 0, 0.25]),
"teal": numpy.array([0, 0.5, 0.5]),
"violet": numpy.array([0.5, 0, 0.5]),
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data["custom colors"])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == "black":
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ("!{}!black".format(alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = "color" + str(len(data["custom colors"]))
data["custom colors"][xcol] = my_col[:3]
return data, xcol, my_col | [
"def",
"mpl_color2xcolor",
"(",
"data",
",",
"matplotlib_color",
")",
":",
"# Convert it to RGBA.",
"my_col",
"=",
"numpy",
".",
"array",
"(",
"mpl",
".",
"colors",
".",
"ColorConverter",
"(",
")",
".",
"to_rgba",
"(",
"matplotlib_color",
")",
")",
"# If the a... | Translates a matplotlib color specification into a proper LaTeX xcolor. | [
"Translates",
"a",
"matplotlib",
"color",
"specification",
"into",
"a",
"proper",
"LaTeX",
"xcolor",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/color.py#L9-L81 | train | 228,014 |
nschloe/matplotlib2tikz | matplotlib2tikz/patch.py | draw_patch | def draw_patch(data, obj):
"""Return the PGFPlots code for patches.
"""
# Gather the draw options.
data, draw_options = mypath.get_draw_options(
data,
obj,
obj.get_edgecolor(),
obj.get_facecolor(),
obj.get_linestyle(),
obj.get_linewidth(),
)
if isinstance(obj, mpl.patches.Rectangle):
# rectangle specialization
return _draw_rectangle(data, obj, draw_options)
elif isinstance(obj, mpl.patches.Ellipse):
# ellipse specialization
return _draw_ellipse(data, obj, draw_options)
# regular patch
data, path_command, _, _ = mypath.draw_path(
data, obj.get_path(), draw_options=draw_options
)
return data, path_command | python | def draw_patch(data, obj):
"""Return the PGFPlots code for patches.
"""
# Gather the draw options.
data, draw_options = mypath.get_draw_options(
data,
obj,
obj.get_edgecolor(),
obj.get_facecolor(),
obj.get_linestyle(),
obj.get_linewidth(),
)
if isinstance(obj, mpl.patches.Rectangle):
# rectangle specialization
return _draw_rectangle(data, obj, draw_options)
elif isinstance(obj, mpl.patches.Ellipse):
# ellipse specialization
return _draw_ellipse(data, obj, draw_options)
# regular patch
data, path_command, _, _ = mypath.draw_path(
data, obj.get_path(), draw_options=draw_options
)
return data, path_command | [
"def",
"draw_patch",
"(",
"data",
",",
"obj",
")",
":",
"# Gather the draw options.",
"data",
",",
"draw_options",
"=",
"mypath",
".",
"get_draw_options",
"(",
"data",
",",
"obj",
",",
"obj",
".",
"get_edgecolor",
"(",
")",
",",
"obj",
".",
"get_facecolor",
... | Return the PGFPlots code for patches. | [
"Return",
"the",
"PGFPlots",
"code",
"for",
"patches",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L8-L32 | train | 228,015 |
nschloe/matplotlib2tikz | matplotlib2tikz/patch.py | _draw_rectangle | def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc). Even those without
# labels explicitly set have a label of "_nolegend_". Everything else should be
# skipped because they likely correspong to axis/legend objects which are handled by
# PGFPlots
label = obj.get_label()
if label == "":
return data, []
# Get actual label, bar charts by default only give rectangles labels of
# "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>.
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
ff = data["float format"]
cont = (
"\\draw[{}] (axis cs:" + ff + "," + ff + ") "
"rectangle (axis cs:" + ff + "," + ff + ");\n"
).format(
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format(
",".join(draw_options)
)
cont += "\\addlegendentry{{{}}}\n\n".format(label)
return data, cont | python | def _draw_rectangle(data, obj, draw_options):
"""Return the PGFPlots code for rectangles.
"""
# Objects with labels are plot objects (from bar charts, etc). Even those without
# labels explicitly set have a label of "_nolegend_". Everything else should be
# skipped because they likely correspong to axis/legend objects which are handled by
# PGFPlots
label = obj.get_label()
if label == "":
return data, []
# Get actual label, bar charts by default only give rectangles labels of
# "_nolegend_". See <https://stackoverflow.com/q/35881290/353337>.
handles, labels = obj.axes.get_legend_handles_labels()
labelsFound = [
label for h, label in zip(handles, labels) if obj in h.get_children()
]
if len(labelsFound) == 1:
label = labelsFound[0]
left_lower_x = obj.get_x()
left_lower_y = obj.get_y()
ff = data["float format"]
cont = (
"\\draw[{}] (axis cs:" + ff + "," + ff + ") "
"rectangle (axis cs:" + ff + "," + ff + ");\n"
).format(
",".join(draw_options),
left_lower_x,
left_lower_y,
left_lower_x + obj.get_width(),
left_lower_y + obj.get_height(),
)
if label != "_nolegend_" and label not in data["rectangle_legends"]:
data["rectangle_legends"].add(label)
cont += "\\addlegendimage{{ybar,ybar legend,{}}};\n".format(
",".join(draw_options)
)
cont += "\\addlegendentry{{{}}}\n\n".format(label)
return data, cont | [
"def",
"_draw_rectangle",
"(",
"data",
",",
"obj",
",",
"draw_options",
")",
":",
"# Objects with labels are plot objects (from bar charts, etc). Even those without",
"# labels explicitly set have a label of \"_nolegend_\". Everything else should be",
"# skipped because they likely corresp... | Return the PGFPlots code for rectangles. | [
"Return",
"the",
"PGFPlots",
"code",
"for",
"rectangles",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L91-L131 | train | 228,016 |
nschloe/matplotlib2tikz | matplotlib2tikz/patch.py | _draw_ellipse | def _draw_ellipse(data, obj, draw_options):
"""Return the PGFPlots code for ellipses.
"""
if isinstance(obj, mpl.patches.Circle):
# circle specialization
return _draw_circle(data, obj, draw_options)
x, y = obj.center
ff = data["float format"]
if obj.angle != 0:
fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}"
draw_options.append(fmt.format(obj.angle, x, y))
cont = (
"\\draw[{}] (axis cs:"
+ ff
+ ","
+ ff
+ ") ellipse ("
+ ff
+ " and "
+ ff
+ ");\n"
).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height)
return data, cont | python | def _draw_ellipse(data, obj, draw_options):
"""Return the PGFPlots code for ellipses.
"""
if isinstance(obj, mpl.patches.Circle):
# circle specialization
return _draw_circle(data, obj, draw_options)
x, y = obj.center
ff = data["float format"]
if obj.angle != 0:
fmt = "rotate around={{" + ff + ":(axis cs:" + ff + "," + ff + ")}}"
draw_options.append(fmt.format(obj.angle, x, y))
cont = (
"\\draw[{}] (axis cs:"
+ ff
+ ","
+ ff
+ ") ellipse ("
+ ff
+ " and "
+ ff
+ ");\n"
).format(",".join(draw_options), x, y, 0.5 * obj.width, 0.5 * obj.height)
return data, cont | [
"def",
"_draw_ellipse",
"(",
"data",
",",
"obj",
",",
"draw_options",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"mpl",
".",
"patches",
".",
"Circle",
")",
":",
"# circle specialization",
"return",
"_draw_circle",
"(",
"data",
",",
"obj",
",",
"draw_op... | Return the PGFPlots code for ellipses. | [
"Return",
"the",
"PGFPlots",
"code",
"for",
"ellipses",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L134-L158 | train | 228,017 |
nschloe/matplotlib2tikz | matplotlib2tikz/patch.py | _draw_circle | def _draw_circle(data, obj, draw_options):
"""Return the PGFPlots code for circles.
"""
x, y = obj.center
ff = data["float format"]
cont = ("\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n").format(
",".join(draw_options), x, y, obj.get_radius()
)
return data, cont | python | def _draw_circle(data, obj, draw_options):
"""Return the PGFPlots code for circles.
"""
x, y = obj.center
ff = data["float format"]
cont = ("\\draw[{}] (axis cs:" + ff + "," + ff + ") circle (" + ff + ");\n").format(
",".join(draw_options), x, y, obj.get_radius()
)
return data, cont | [
"def",
"_draw_circle",
"(",
"data",
",",
"obj",
",",
"draw_options",
")",
":",
"x",
",",
"y",
"=",
"obj",
".",
"center",
"ff",
"=",
"data",
"[",
"\"float format\"",
"]",
"cont",
"=",
"(",
"\"\\\\draw[{}] (axis cs:\"",
"+",
"ff",
"+",
"\",\"",
"+",
"ff"... | Return the PGFPlots code for circles. | [
"Return",
"the",
"PGFPlots",
"code",
"for",
"circles",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/patch.py#L161-L169 | train | 228,018 |
nschloe/matplotlib2tikz | matplotlib2tikz/image.py | draw_image | def draw_image(data, obj):
"""Returns the PGFPlots code for an image environment.
"""
content = []
filename, rel_filepath = files.new_filename(data, "img", ".png")
# store the image as in a file
img_array = obj.get_array()
dims = img_array.shape
if len(dims) == 2: # the values are given as one real number: look at cmap
clims = obj.get_clim()
mpl.pyplot.imsave(
fname=filename,
arr=img_array,
cmap=obj.get_cmap(),
vmin=clims[0],
vmax=clims[1],
origin=obj.origin,
)
else:
# RGB (+alpha) information at each point
assert len(dims) == 3 and dims[2] in [3, 4]
# convert to PIL image
if obj.origin == "lower":
img_array = numpy.flipud(img_array)
# Convert mpl image to PIL
image = PIL.Image.fromarray(numpy.uint8(img_array * 255))
# If the input image is PIL:
# image = PIL.Image.fromarray(img_array)
image.save(filename, origin=obj.origin)
# write the corresponding information to the TikZ file
extent = obj.get_extent()
# the format specification will only accept tuples
if not isinstance(extent, tuple):
extent = tuple(extent)
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content | python | def draw_image(data, obj):
"""Returns the PGFPlots code for an image environment.
"""
content = []
filename, rel_filepath = files.new_filename(data, "img", ".png")
# store the image as in a file
img_array = obj.get_array()
dims = img_array.shape
if len(dims) == 2: # the values are given as one real number: look at cmap
clims = obj.get_clim()
mpl.pyplot.imsave(
fname=filename,
arr=img_array,
cmap=obj.get_cmap(),
vmin=clims[0],
vmax=clims[1],
origin=obj.origin,
)
else:
# RGB (+alpha) information at each point
assert len(dims) == 3 and dims[2] in [3, 4]
# convert to PIL image
if obj.origin == "lower":
img_array = numpy.flipud(img_array)
# Convert mpl image to PIL
image = PIL.Image.fromarray(numpy.uint8(img_array * 255))
# If the input image is PIL:
# image = PIL.Image.fromarray(img_array)
image.save(filename, origin=obj.origin)
# write the corresponding information to the TikZ file
extent = obj.get_extent()
# the format specification will only accept tuples
if not isinstance(extent, tuple):
extent = tuple(extent)
# Explicitly use \pgfimage as includegrapics command, as the default
# \includegraphics fails unexpectedly in some cases
ff = data["float format"]
content.append(
(
"\\addplot graphics [includegraphics cmd=\\pgfimage,"
"xmin=" + ff + ", xmax=" + ff + ", "
"ymin=" + ff + ", ymax=" + ff + "] {{{}}};\n"
).format(*(extent + (rel_filepath,)))
)
return data, content | [
"def",
"draw_image",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"[",
"]",
"filename",
",",
"rel_filepath",
"=",
"files",
".",
"new_filename",
"(",
"data",
",",
"\"img\"",
",",
"\".png\"",
")",
"# store the image as in a file",
"img_array",
"=",
"obj"... | Returns the PGFPlots code for an image environment. | [
"Returns",
"the",
"PGFPlots",
"code",
"for",
"an",
"image",
"environment",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/image.py#L10-L64 | train | 228,019 |
nschloe/matplotlib2tikz | matplotlib2tikz/util.py | get_legend_text | def get_legend_text(obj):
"""Check if line is in legend.
"""
leg = obj.axes.get_legend()
if leg is None:
return None
keys = [l.get_label() for l in leg.legendHandles if l is not None]
values = [l.get_text() for l in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label]
return None | python | def get_legend_text(obj):
"""Check if line is in legend.
"""
leg = obj.axes.get_legend()
if leg is None:
return None
keys = [l.get_label() for l in leg.legendHandles if l is not None]
values = [l.get_text() for l in leg.texts]
label = obj.get_label()
d = dict(zip(keys, values))
if label in d:
return d[label]
return None | [
"def",
"get_legend_text",
"(",
"obj",
")",
":",
"leg",
"=",
"obj",
".",
"axes",
".",
"get_legend",
"(",
")",
"if",
"leg",
"is",
"None",
":",
"return",
"None",
"keys",
"=",
"[",
"l",
".",
"get_label",
"(",
")",
"for",
"l",
"in",
"leg",
".",
"legen... | Check if line is in legend. | [
"Check",
"if",
"line",
"is",
"in",
"legend",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/util.py#L11-L26 | train | 228,020 |
nschloe/matplotlib2tikz | matplotlib2tikz/save.py | _get_color_definitions | def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}"
for name, rgb in data["custom colors"].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2]))
return definitions | python | def _get_color_definitions(data):
"""Returns the list of custom color definitions for the TikZ file.
"""
definitions = []
fmt = "\\definecolor{{{}}}{{rgb}}{{" + ",".join(3 * [data["float format"]]) + "}}"
for name, rgb in data["custom colors"].items():
definitions.append(fmt.format(name, rgb[0], rgb[1], rgb[2]))
return definitions | [
"def",
"_get_color_definitions",
"(",
"data",
")",
":",
"definitions",
"=",
"[",
"]",
"fmt",
"=",
"\"\\\\definecolor{{{}}}{{rgb}}{{\"",
"+",
"\",\"",
".",
"join",
"(",
"3",
"*",
"[",
"data",
"[",
"\"float format\"",
"]",
"]",
")",
"+",
"\"}}\"",
"for",
"na... | Returns the list of custom color definitions for the TikZ file. | [
"Returns",
"the",
"list",
"of",
"custom",
"color",
"definitions",
"for",
"the",
"TikZ",
"file",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L283-L290 | train | 228,021 |
nschloe/matplotlib2tikz | matplotlib2tikz/save.py | _print_pgfplot_libs_message | def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return | python | def _print_pgfplot_libs_message(data):
"""Prints message to screen indicating the use of PGFPlots and its
libraries."""
pgfplotslibs = ",".join(list(data["pgfplots libs"]))
tikzlibs = ",".join(list(data["tikz libs"]))
print(70 * "=")
print("Please add the following lines to your LaTeX preamble:\n")
print("\\usepackage[utf8]{inputenc}")
print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX")
print("\\usepackage{pgfplots}")
if tikzlibs:
print("\\usetikzlibrary{" + tikzlibs + "}")
if pgfplotslibs:
print("\\usepgfplotslibrary{" + pgfplotslibs + "}")
print(70 * "=")
return | [
"def",
"_print_pgfplot_libs_message",
"(",
"data",
")",
":",
"pgfplotslibs",
"=",
"\",\"",
".",
"join",
"(",
"list",
"(",
"data",
"[",
"\"pgfplots libs\"",
"]",
")",
")",
"tikzlibs",
"=",
"\",\"",
".",
"join",
"(",
"list",
"(",
"data",
"[",
"\"tikz libs\""... | Prints message to screen indicating the use of PGFPlots and its
libraries. | [
"Prints",
"message",
"to",
"screen",
"indicating",
"the",
"use",
"of",
"PGFPlots",
"and",
"its",
"libraries",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L293-L309 | train | 228,022 |
nschloe/matplotlib2tikz | matplotlib2tikz/save.py | _ContentManager.extend | def extend(self, content, zorder):
""" Extends with a list and a z-order
"""
if zorder not in self._content:
self._content[zorder] = []
self._content[zorder].extend(content) | python | def extend(self, content, zorder):
""" Extends with a list and a z-order
"""
if zorder not in self._content:
self._content[zorder] = []
self._content[zorder].extend(content) | [
"def",
"extend",
"(",
"self",
",",
"content",
",",
"zorder",
")",
":",
"if",
"zorder",
"not",
"in",
"self",
".",
"_content",
":",
"self",
".",
"_content",
"[",
"zorder",
"]",
"=",
"[",
"]",
"self",
".",
"_content",
"[",
"zorder",
"]",
".",
"extend"... | Extends with a list and a z-order | [
"Extends",
"with",
"a",
"list",
"and",
"a",
"z",
"-",
"order"
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/save.py#L322-L327 | train | 228,023 |
nschloe/matplotlib2tikz | matplotlib2tikz/line2d.py | draw_line2d | def draw_line2d(data, obj):
"""Returns the PGFPlots code for an Line2D environment.
"""
content = []
addplot_options = []
# If line is of length 0, do nothing. Otherwise, an empty \addplot table will be
# created, which will be interpreted as an external data source in either the file
# '' or '.tex'. Instead, render nothing.
if len(obj.get_xdata()) == 0:
return data, []
# get the linewidth (in pt)
line_width = mypath.mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth())
if line_width:
addplot_options.append(line_width)
# get line color
color = obj.get_color()
data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color)
addplot_options.append(line_xcolor)
alpha = obj.get_alpha()
if alpha is not None:
addplot_options.append("opacity={}".format(alpha))
linestyle = mypath.mpl_linestyle2pgfplots_linestyle(obj.get_linestyle(), line=obj)
if linestyle is not None and linestyle != "solid":
addplot_options.append(linestyle)
marker_face_color = obj.get_markerfacecolor()
marker_edge_color = obj.get_markeredgecolor()
data, marker, extra_mark_options = _mpl_marker2pgfp_marker(
data, obj.get_marker(), marker_face_color
)
if marker:
_marker(
obj,
data,
marker,
addplot_options,
extra_mark_options,
marker_face_color,
marker_edge_color,
line_xcolor,
)
if marker and linestyle is None:
addplot_options.append("only marks")
# Check if a line is in a legend and forget it if not.
# Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>.
legend_text = get_legend_text(obj)
if legend_text is None and has_legend(obj.axes):
addplot_options.append("forget plot")
# process options
content.append("\\addplot ")
if addplot_options:
content.append("[{}]\n".format(", ".join(addplot_options)))
c, axis_options = _table(obj, data)
content += c
if legend_text is not None:
content.append("\\addlegendentry{{{}}}\n".format(legend_text))
return data, content | python | def draw_line2d(data, obj):
"""Returns the PGFPlots code for an Line2D environment.
"""
content = []
addplot_options = []
# If line is of length 0, do nothing. Otherwise, an empty \addplot table will be
# created, which will be interpreted as an external data source in either the file
# '' or '.tex'. Instead, render nothing.
if len(obj.get_xdata()) == 0:
return data, []
# get the linewidth (in pt)
line_width = mypath.mpl_linewidth2pgfp_linewidth(data, obj.get_linewidth())
if line_width:
addplot_options.append(line_width)
# get line color
color = obj.get_color()
data, line_xcolor, _ = mycol.mpl_color2xcolor(data, color)
addplot_options.append(line_xcolor)
alpha = obj.get_alpha()
if alpha is not None:
addplot_options.append("opacity={}".format(alpha))
linestyle = mypath.mpl_linestyle2pgfplots_linestyle(obj.get_linestyle(), line=obj)
if linestyle is not None and linestyle != "solid":
addplot_options.append(linestyle)
marker_face_color = obj.get_markerfacecolor()
marker_edge_color = obj.get_markeredgecolor()
data, marker, extra_mark_options = _mpl_marker2pgfp_marker(
data, obj.get_marker(), marker_face_color
)
if marker:
_marker(
obj,
data,
marker,
addplot_options,
extra_mark_options,
marker_face_color,
marker_edge_color,
line_xcolor,
)
if marker and linestyle is None:
addplot_options.append("only marks")
# Check if a line is in a legend and forget it if not.
# Fixes <https://github.com/nschloe/matplotlib2tikz/issues/167>.
legend_text = get_legend_text(obj)
if legend_text is None and has_legend(obj.axes):
addplot_options.append("forget plot")
# process options
content.append("\\addplot ")
if addplot_options:
content.append("[{}]\n".format(", ".join(addplot_options)))
c, axis_options = _table(obj, data)
content += c
if legend_text is not None:
content.append("\\addlegendentry{{{}}}\n".format(legend_text))
return data, content | [
"def",
"draw_line2d",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"[",
"]",
"addplot_options",
"=",
"[",
"]",
"# If line is of length 0, do nothing. Otherwise, an empty \\addplot table will be",
"# created, which will be interpreted as an external data source in either the ... | Returns the PGFPlots code for an Line2D environment. | [
"Returns",
"the",
"PGFPlots",
"code",
"for",
"an",
"Line2D",
"environment",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L18-L85 | train | 228,024 |
nschloe/matplotlib2tikz | matplotlib2tikz/line2d.py | draw_linecollection | def draw_linecollection(data, obj):
"""Returns Pgfplots code for a number of patch objects.
"""
content = []
edgecolors = obj.get_edgecolors()
linestyles = obj.get_linestyles()
linewidths = obj.get_linewidths()
paths = obj.get_paths()
for i, path in enumerate(paths):
color = edgecolors[i] if i < len(edgecolors) else edgecolors[0]
style = linestyles[i] if i < len(linestyles) else linestyles[0]
width = linewidths[i] if i < len(linewidths) else linewidths[0]
data, options = mypath.get_draw_options(data, obj, color, None, style, width)
# TODO what about masks?
data, cont, _, _ = mypath.draw_path(
data, path, draw_options=options, simplify=False
)
content.append(cont + "\n")
return data, content | python | def draw_linecollection(data, obj):
"""Returns Pgfplots code for a number of patch objects.
"""
content = []
edgecolors = obj.get_edgecolors()
linestyles = obj.get_linestyles()
linewidths = obj.get_linewidths()
paths = obj.get_paths()
for i, path in enumerate(paths):
color = edgecolors[i] if i < len(edgecolors) else edgecolors[0]
style = linestyles[i] if i < len(linestyles) else linestyles[0]
width = linewidths[i] if i < len(linewidths) else linewidths[0]
data, options = mypath.get_draw_options(data, obj, color, None, style, width)
# TODO what about masks?
data, cont, _, _ = mypath.draw_path(
data, path, draw_options=options, simplify=False
)
content.append(cont + "\n")
return data, content | [
"def",
"draw_linecollection",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"[",
"]",
"edgecolors",
"=",
"obj",
".",
"get_edgecolors",
"(",
")",
"linestyles",
"=",
"obj",
".",
"get_linestyles",
"(",
")",
"linewidths",
"=",
"obj",
".",
"get_linewidths"... | Returns Pgfplots code for a number of patch objects. | [
"Returns",
"Pgfplots",
"code",
"for",
"a",
"number",
"of",
"patch",
"objects",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L88-L111 | train | 228,025 |
nschloe/matplotlib2tikz | matplotlib2tikz/line2d.py | _mpl_marker2pgfp_marker | def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color):
"""Translates a marker style of matplotlib to the corresponding style
in PGFPlots.
"""
# try default list
try:
pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker]
except KeyError:
pass
else:
if (marker_face_color is not None) and pgfplots_marker == "o":
pgfplots_marker = "*"
data["tikz libs"].add("plotmarks")
marker_options = None
return (data, pgfplots_marker, marker_options)
# try plotmarks list
try:
data["tikz libs"].add("plotmarks")
pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker]
except KeyError:
# There's no equivalent for the pixel marker (,) in Pgfplots.
pass
else:
if (
marker_face_color is not None
and (
not isinstance(marker_face_color, str)
or marker_face_color.lower() != "none"
)
and pgfplots_marker not in ["|", "-", "asterisk", "star"]
):
pgfplots_marker += "*"
return (data, pgfplots_marker, marker_options)
return data, None, None | python | def _mpl_marker2pgfp_marker(data, mpl_marker, marker_face_color):
"""Translates a marker style of matplotlib to the corresponding style
in PGFPlots.
"""
# try default list
try:
pgfplots_marker = _MP_MARKER2PGF_MARKER[mpl_marker]
except KeyError:
pass
else:
if (marker_face_color is not None) and pgfplots_marker == "o":
pgfplots_marker = "*"
data["tikz libs"].add("plotmarks")
marker_options = None
return (data, pgfplots_marker, marker_options)
# try plotmarks list
try:
data["tikz libs"].add("plotmarks")
pgfplots_marker, marker_options = _MP_MARKER2PLOTMARKS[mpl_marker]
except KeyError:
# There's no equivalent for the pixel marker (,) in Pgfplots.
pass
else:
if (
marker_face_color is not None
and (
not isinstance(marker_face_color, str)
or marker_face_color.lower() != "none"
)
and pgfplots_marker not in ["|", "-", "asterisk", "star"]
):
pgfplots_marker += "*"
return (data, pgfplots_marker, marker_options)
return data, None, None | [
"def",
"_mpl_marker2pgfp_marker",
"(",
"data",
",",
"mpl_marker",
",",
"marker_face_color",
")",
":",
"# try default list",
"try",
":",
"pgfplots_marker",
"=",
"_MP_MARKER2PGF_MARKER",
"[",
"mpl_marker",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"if",
"... | Translates a marker style of matplotlib to the corresponding style
in PGFPlots. | [
"Translates",
"a",
"marker",
"style",
"of",
"matplotlib",
"to",
"the",
"corresponding",
"style",
"in",
"PGFPlots",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/line2d.py#L147-L182 | train | 228,026 |
nschloe/matplotlib2tikz | matplotlib2tikz/text.py | draw_text | def draw_text(data, obj):
"""Paints text on the graph.
"""
content = []
properties = []
style = []
if isinstance(obj, mpl.text.Annotation):
_annotation(obj, data, content)
# 1: coordinates
# 2: properties (shapes, rotation, etc)
# 3: text style
# 4: the text
# -------1--------2---3--4--
pos = obj.get_position()
# from .util import transform_to_data_coordinates
# pos = transform_to_data_coordinates(obj, *pos)
text = obj.get_text()
if text in ["", data["current axis title"]]:
# Text nodes which are direct children of Axes are typically titles. They are
# already captured by the `title` property of pgfplots axes, so skip them here.
return data, content
size = obj.get_size()
bbox = obj.get_bbox_patch()
converter = mpl.colors.ColorConverter()
# without the factor 0.5, the fonts are too big most of the time.
# TODO fix this
scaling = 0.5 * size / data["font size"]
ff = data["float format"]
if scaling != 1.0:
properties.append(("scale=" + ff).format(scaling))
if bbox is not None:
_bbox(bbox, data, properties, scaling)
ha = obj.get_ha()
va = obj.get_va()
anchor = _transform_positioning(ha, va)
if anchor is not None:
properties.append(anchor)
data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color()))
properties.append("text={}".format(col))
properties.append("rotate={:.1f}".format(obj.get_rotation()))
if obj.get_style() == "italic":
style.append("\\itshape")
else:
assert obj.get_style() == "normal"
# From matplotlib/font_manager.py:
# weight_dict = {
# 'ultralight' : 100,
# 'light' : 200,
# 'normal' : 400,
# 'regular' : 400,
# 'book' : 400,
# 'medium' : 500,
# 'roman' : 500,
# 'semibold' : 600,
# 'demibold' : 600,
# 'demi' : 600,
# 'bold' : 700,
# 'heavy' : 800,
# 'extra bold' : 800,
# 'black' : 900}
#
# get_weights returns a numeric value in the range 0-1000 or one of
# ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’,
# ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’
weight = obj.get_weight()
if weight in [
"semibold",
"demibold",
"demi",
"bold",
"heavy",
"extra bold",
"black",
] or (isinstance(weight, int) and weight > 550):
style.append("\\bfseries")
# \lfseries isn't that common yet
# elif weight == 'light' or (isinstance(weight, int) and weight < 300):
# style.append('\\lfseries')
if obj.axes:
# If the coordinates are relative to an axis, use `axis cs`.
tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos)
else:
# relative to the entire figure, it's a getting a littler harder. See
# <http://tex.stackexchange.com/a/274902/13262> for a solution to the
# problem:
tikz_pos = (
"({{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.south east)$}}"
"|-"
"{{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.north west)$}})"
).format(*pos)
if "\n" in text:
# http://tex.stackexchange.com/a/124114/13262
properties.append("align={}".format(ha))
# Manipulating the text here is actually against mpl2tikz's policy not
# to do that. On the other hand, newlines should translate into
# newlines.
# We might want to remove this here in the future.
text = text.replace("\n ", "\\\\")
content.append(
"\\node at {}[\n {}\n]{{{}}};\n".format(
tikz_pos, ",\n ".join(properties), " ".join(style + [text])
)
)
return data, content | python | def draw_text(data, obj):
"""Paints text on the graph.
"""
content = []
properties = []
style = []
if isinstance(obj, mpl.text.Annotation):
_annotation(obj, data, content)
# 1: coordinates
# 2: properties (shapes, rotation, etc)
# 3: text style
# 4: the text
# -------1--------2---3--4--
pos = obj.get_position()
# from .util import transform_to_data_coordinates
# pos = transform_to_data_coordinates(obj, *pos)
text = obj.get_text()
if text in ["", data["current axis title"]]:
# Text nodes which are direct children of Axes are typically titles. They are
# already captured by the `title` property of pgfplots axes, so skip them here.
return data, content
size = obj.get_size()
bbox = obj.get_bbox_patch()
converter = mpl.colors.ColorConverter()
# without the factor 0.5, the fonts are too big most of the time.
# TODO fix this
scaling = 0.5 * size / data["font size"]
ff = data["float format"]
if scaling != 1.0:
properties.append(("scale=" + ff).format(scaling))
if bbox is not None:
_bbox(bbox, data, properties, scaling)
ha = obj.get_ha()
va = obj.get_va()
anchor = _transform_positioning(ha, va)
if anchor is not None:
properties.append(anchor)
data, col, _ = color.mpl_color2xcolor(data, converter.to_rgb(obj.get_color()))
properties.append("text={}".format(col))
properties.append("rotate={:.1f}".format(obj.get_rotation()))
if obj.get_style() == "italic":
style.append("\\itshape")
else:
assert obj.get_style() == "normal"
# From matplotlib/font_manager.py:
# weight_dict = {
# 'ultralight' : 100,
# 'light' : 200,
# 'normal' : 400,
# 'regular' : 400,
# 'book' : 400,
# 'medium' : 500,
# 'roman' : 500,
# 'semibold' : 600,
# 'demibold' : 600,
# 'demi' : 600,
# 'bold' : 700,
# 'heavy' : 800,
# 'extra bold' : 800,
# 'black' : 900}
#
# get_weights returns a numeric value in the range 0-1000 or one of
# ‘light’, ‘normal’, ‘regular’, ‘book’, ‘medium’, ‘roman’, ‘semibold’,
# ‘demibold’, ‘demi’, ‘bold’, ‘heavy’, ‘extra bold’, ‘black’
weight = obj.get_weight()
if weight in [
"semibold",
"demibold",
"demi",
"bold",
"heavy",
"extra bold",
"black",
] or (isinstance(weight, int) and weight > 550):
style.append("\\bfseries")
# \lfseries isn't that common yet
# elif weight == 'light' or (isinstance(weight, int) and weight < 300):
# style.append('\\lfseries')
if obj.axes:
# If the coordinates are relative to an axis, use `axis cs`.
tikz_pos = ("(axis cs:" + ff + "," + ff + ")").format(*pos)
else:
# relative to the entire figure, it's a getting a littler harder. See
# <http://tex.stackexchange.com/a/274902/13262> for a solution to the
# problem:
tikz_pos = (
"({{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.south east)$}}"
"|-"
"{{$(current bounding box.south west)!" + ff + "!"
"(current bounding box.north west)$}})"
).format(*pos)
if "\n" in text:
# http://tex.stackexchange.com/a/124114/13262
properties.append("align={}".format(ha))
# Manipulating the text here is actually against mpl2tikz's policy not
# to do that. On the other hand, newlines should translate into
# newlines.
# We might want to remove this here in the future.
text = text.replace("\n ", "\\\\")
content.append(
"\\node at {}[\n {}\n]{{{}}};\n".format(
tikz_pos, ",\n ".join(properties), " ".join(style + [text])
)
)
return data, content | [
"def",
"draw_text",
"(",
"data",
",",
"obj",
")",
":",
"content",
"=",
"[",
"]",
"properties",
"=",
"[",
"]",
"style",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"obj",
",",
"mpl",
".",
"text",
".",
"Annotation",
")",
":",
"_annotation",
"(",
"obj",
... | Paints text on the graph. | [
"Paints",
"text",
"on",
"the",
"graph",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/text.py#L8-L126 | train | 228,027 |
nschloe/matplotlib2tikz | matplotlib2tikz/text.py | _transform_positioning | def _transform_positioning(ha, va):
"""Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less."""
if ha == "center" and va == "center":
return None
ha_mpl_to_tikz = {"right": "east", "left": "west", "center": ""}
va_mpl_to_tikz = {
"top": "north",
"bottom": "south",
"center": "",
"baseline": "base",
}
return "anchor={} {}".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip() | python | def _transform_positioning(ha, va):
"""Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less."""
if ha == "center" and va == "center":
return None
ha_mpl_to_tikz = {"right": "east", "left": "west", "center": ""}
va_mpl_to_tikz = {
"top": "north",
"bottom": "south",
"center": "",
"baseline": "base",
}
return "anchor={} {}".format(va_mpl_to_tikz[va], ha_mpl_to_tikz[ha]).strip() | [
"def",
"_transform_positioning",
"(",
"ha",
",",
"va",
")",
":",
"if",
"ha",
"==",
"\"center\"",
"and",
"va",
"==",
"\"center\"",
":",
"return",
"None",
"ha_mpl_to_tikz",
"=",
"{",
"\"right\"",
":",
"\"east\"",
",",
"\"left\"",
":",
"\"west\"",
",",
"\"cen... | Converts matplotlib positioning to pgf node positioning.
Not quite accurate but the results are equivalent more or less. | [
"Converts",
"matplotlib",
"positioning",
"to",
"pgf",
"node",
"positioning",
".",
"Not",
"quite",
"accurate",
"but",
"the",
"results",
"are",
"equivalent",
"more",
"or",
"less",
"."
] | ac5daca6f38b834d757f6c6ae6cc34121956f46b | https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/text.py#L129-L142 | train | 228,028 |
turicas/rows | rows/plugins/plugin_json.py | import_from_json | def import_from_json(filename_or_fobj, encoding="utf-8", *args, **kwargs):
"""Import a JSON file or file-like object into a `rows.Table`.
If a file-like object is provided it MUST be open in text (non-binary) mode
on Python 3 and could be open in both binary or text mode on Python 2.
"""
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="json", encoding=encoding)
json_obj = json.load(source.fobj, encoding=source.encoding)
field_names = list(json_obj[0].keys())
table_rows = [[item[key] for key in field_names] for item in json_obj]
meta = {"imported_from": "json", "source": source}
return create_table([field_names] + table_rows, meta=meta, *args, **kwargs) | python | def import_from_json(filename_or_fobj, encoding="utf-8", *args, **kwargs):
"""Import a JSON file or file-like object into a `rows.Table`.
If a file-like object is provided it MUST be open in text (non-binary) mode
on Python 3 and could be open in both binary or text mode on Python 2.
"""
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="json", encoding=encoding)
json_obj = json.load(source.fobj, encoding=source.encoding)
field_names = list(json_obj[0].keys())
table_rows = [[item[key] for key in field_names] for item in json_obj]
meta = {"imported_from": "json", "source": source}
return create_table([field_names] + table_rows, meta=meta, *args, **kwargs) | [
"def",
"import_from_json",
"(",
"filename_or_fobj",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"Source",
".",
"from_file",
"(",
"filename_or_fobj",
",",
"mode",
"=",
"\"rb\"",
",",
"plugin_name",
... | Import a JSON file or file-like object into a `rows.Table`.
If a file-like object is provided it MUST be open in text (non-binary) mode
on Python 3 and could be open in both binary or text mode on Python 2. | [
"Import",
"a",
"JSON",
"file",
"or",
"file",
"-",
"like",
"object",
"into",
"a",
"rows",
".",
"Table",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_json.py#L33-L47 | train | 228,029 |
turicas/rows | rows/plugins/plugin_json.py | export_to_json | def export_to_json(
table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs
):
"""Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
"""
# TODO: will work only if table.fields is OrderedDict
fields = table.fields
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
data = [
{
field_name: _convert(value, fields[field_name], *args, **kwargs)
for field_name, value in zip(field_names, row)
}
for row in prepared_table
]
result = json.dumps(data, indent=indent)
if type(result) is six.text_type: # Python 3
result = result.encode(encoding)
if indent is not None:
# clean up empty spaces at the end of lines
result = b"\n".join(line.rstrip() for line in result.splitlines())
return export_data(filename_or_fobj, result, mode="wb") | python | def export_to_json(
table, filename_or_fobj=None, encoding="utf-8", indent=None, *args, **kwargs
):
"""Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`).
"""
# TODO: will work only if table.fields is OrderedDict
fields = table.fields
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
data = [
{
field_name: _convert(value, fields[field_name], *args, **kwargs)
for field_name, value in zip(field_names, row)
}
for row in prepared_table
]
result = json.dumps(data, indent=indent)
if type(result) is six.text_type: # Python 3
result = result.encode(encoding)
if indent is not None:
# clean up empty spaces at the end of lines
result = b"\n".join(line.rstrip() for line in result.splitlines())
return export_data(filename_or_fobj, result, mode="wb") | [
"def",
"export_to_json",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"indent",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: will work only if table.fields is OrderedDict",
"fields",
"=... | Export a `rows.Table` to a JSON file or file-like object.
If a file-like object is provided it MUST be open in binary mode (like in
`open('myfile.json', mode='wb')`). | [
"Export",
"a",
"rows",
".",
"Table",
"to",
"a",
"JSON",
"file",
"or",
"file",
"-",
"like",
"object",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_json.py#L68-L97 | train | 228,030 |
turicas/rows | rows/utils.py | plugin_name_by_uri | def plugin_name_by_uri(uri):
"Return the plugin name based on the URI"
# TODO: parse URIs like 'sqlite://' also
parsed = urlparse(uri)
basename = os.path.basename(parsed.path)
if not basename.strip():
raise RuntimeError("Could not identify file format.")
plugin_name = basename.split(".")[-1].lower()
if plugin_name in FILE_EXTENSIONS:
plugin_name = MIME_TYPE_TO_PLUGIN_NAME[FILE_EXTENSIONS[plugin_name]]
return plugin_name | python | def plugin_name_by_uri(uri):
"Return the plugin name based on the URI"
# TODO: parse URIs like 'sqlite://' also
parsed = urlparse(uri)
basename = os.path.basename(parsed.path)
if not basename.strip():
raise RuntimeError("Could not identify file format.")
plugin_name = basename.split(".")[-1].lower()
if plugin_name in FILE_EXTENSIONS:
plugin_name = MIME_TYPE_TO_PLUGIN_NAME[FILE_EXTENSIONS[plugin_name]]
return plugin_name | [
"def",
"plugin_name_by_uri",
"(",
"uri",
")",
":",
"# TODO: parse URIs like 'sqlite://' also",
"parsed",
"=",
"urlparse",
"(",
"uri",
")",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"parsed",
".",
"path",
")",
"if",
"not",
"basename",
".",
"st... | Return the plugin name based on the URI | [
"Return",
"the",
"plugin",
"name",
"based",
"on",
"the",
"URI"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L249-L263 | train | 228,031 |
turicas/rows | rows/utils.py | extension_by_source | def extension_by_source(source, mime_type):
"Return the file extension used by this plugin"
# TODO: should get this information from the plugin
extension = source.plugin_name
if extension:
return extension
if mime_type:
return mime_type.split("/")[-1] | python | def extension_by_source(source, mime_type):
"Return the file extension used by this plugin"
# TODO: should get this information from the plugin
extension = source.plugin_name
if extension:
return extension
if mime_type:
return mime_type.split("/")[-1] | [
"def",
"extension_by_source",
"(",
"source",
",",
"mime_type",
")",
":",
"# TODO: should get this information from the plugin",
"extension",
"=",
"source",
".",
"plugin_name",
"if",
"extension",
":",
"return",
"extension",
"if",
"mime_type",
":",
"return",
"mime_type",
... | Return the file extension used by this plugin | [
"Return",
"the",
"file",
"extension",
"used",
"by",
"this",
"plugin"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L266-L275 | train | 228,032 |
turicas/rows | rows/utils.py | plugin_name_by_mime_type | def plugin_name_by_mime_type(mime_type, mime_name, file_extension):
"Return the plugin name based on the MIME type"
return MIME_TYPE_TO_PLUGIN_NAME.get(
normalize_mime_type(mime_type, mime_name, file_extension), None
) | python | def plugin_name_by_mime_type(mime_type, mime_name, file_extension):
"Return the plugin name based on the MIME type"
return MIME_TYPE_TO_PLUGIN_NAME.get(
normalize_mime_type(mime_type, mime_name, file_extension), None
) | [
"def",
"plugin_name_by_mime_type",
"(",
"mime_type",
",",
"mime_name",
",",
"file_extension",
")",
":",
"return",
"MIME_TYPE_TO_PLUGIN_NAME",
".",
"get",
"(",
"normalize_mime_type",
"(",
"mime_type",
",",
"mime_name",
",",
"file_extension",
")",
",",
"None",
")"
] | Return the plugin name based on the MIME type | [
"Return",
"the",
"plugin",
"name",
"based",
"on",
"the",
"MIME",
"type"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L297-L302 | train | 228,033 |
turicas/rows | rows/utils.py | detect_source | def detect_source(uri, verify_ssl, progress, timeout=5):
"""Return a `rows.Source` with information for a given URI
If URI starts with "http" or "https" the file will be downloaded.
This function should only be used if the URI already exists because it's
going to download/open the file to detect its encoding and MIME type.
"""
# TODO: should also supporte other schemes, like file://, sqlite:// etc.
if uri.lower().startswith("http://") or uri.lower().startswith("https://"):
return download_file(
uri, verify_ssl=verify_ssl, timeout=timeout, progress=progress, detect=True
)
elif uri.startswith("postgres://"):
return Source(
should_delete=False,
encoding=None,
plugin_name="postgresql",
uri=uri,
is_file=False,
local=None,
)
else:
return local_file(uri) | python | def detect_source(uri, verify_ssl, progress, timeout=5):
"""Return a `rows.Source` with information for a given URI
If URI starts with "http" or "https" the file will be downloaded.
This function should only be used if the URI already exists because it's
going to download/open the file to detect its encoding and MIME type.
"""
# TODO: should also supporte other schemes, like file://, sqlite:// etc.
if uri.lower().startswith("http://") or uri.lower().startswith("https://"):
return download_file(
uri, verify_ssl=verify_ssl, timeout=timeout, progress=progress, detect=True
)
elif uri.startswith("postgres://"):
return Source(
should_delete=False,
encoding=None,
plugin_name="postgresql",
uri=uri,
is_file=False,
local=None,
)
else:
return local_file(uri) | [
"def",
"detect_source",
"(",
"uri",
",",
"verify_ssl",
",",
"progress",
",",
"timeout",
"=",
"5",
")",
":",
"# TODO: should also supporte other schemes, like file://, sqlite:// etc.",
"if",
"uri",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"\"http://\"",
")",
... | Return a `rows.Source` with information for a given URI
If URI starts with "http" or "https" the file will be downloaded.
This function should only be used if the URI already exists because it's
going to download/open the file to detect its encoding and MIME type. | [
"Return",
"a",
"rows",
".",
"Source",
"with",
"information",
"for",
"a",
"given",
"URI"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L439-L465 | train | 228,034 |
turicas/rows | rows/utils.py | import_from_source | def import_from_source(source, default_encoding, *args, **kwargs):
"Import data described in a `rows.Source` into a `rows.Table`"
# TODO: test open_compressed
plugin_name = source.plugin_name
kwargs["encoding"] = (
kwargs.get("encoding", None) or source.encoding or default_encoding
)
try:
import_function = getattr(rows, "import_from_{}".format(plugin_name))
except AttributeError:
raise ValueError('Plugin (import) "{}" not found'.format(plugin_name))
table = import_function(source.uri, *args, **kwargs)
return table | python | def import_from_source(source, default_encoding, *args, **kwargs):
"Import data described in a `rows.Source` into a `rows.Table`"
# TODO: test open_compressed
plugin_name = source.plugin_name
kwargs["encoding"] = (
kwargs.get("encoding", None) or source.encoding or default_encoding
)
try:
import_function = getattr(rows, "import_from_{}".format(plugin_name))
except AttributeError:
raise ValueError('Plugin (import) "{}" not found'.format(plugin_name))
table = import_function(source.uri, *args, **kwargs)
return table | [
"def",
"import_from_source",
"(",
"source",
",",
"default_encoding",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: test open_compressed",
"plugin_name",
"=",
"source",
".",
"plugin_name",
"kwargs",
"[",
"\"encoding\"",
"]",
"=",
"(",
"kwargs",
... | Import data described in a `rows.Source` into a `rows.Table` | [
"Import",
"data",
"described",
"in",
"a",
"rows",
".",
"Source",
"into",
"a",
"rows",
".",
"Table"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L468-L484 | train | 228,035 |
turicas/rows | rows/utils.py | import_from_uri | def import_from_uri(
uri, default_encoding="utf-8", verify_ssl=True, progress=False, *args, **kwargs
):
"Given an URI, detects plugin and encoding and imports into a `rows.Table`"
# TODO: support '-' also
# TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can
# skip encoding detection.
source = detect_source(uri, verify_ssl=verify_ssl, progress=progress)
return import_from_source(source, default_encoding, *args, **kwargs) | python | def import_from_uri(
uri, default_encoding="utf-8", verify_ssl=True, progress=False, *args, **kwargs
):
"Given an URI, detects plugin and encoding and imports into a `rows.Table`"
# TODO: support '-' also
# TODO: (optimization) if `kwargs.get('encoding', None) is not None` we can
# skip encoding detection.
source = detect_source(uri, verify_ssl=verify_ssl, progress=progress)
return import_from_source(source, default_encoding, *args, **kwargs) | [
"def",
"import_from_uri",
"(",
"uri",
",",
"default_encoding",
"=",
"\"utf-8\"",
",",
"verify_ssl",
"=",
"True",
",",
"progress",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: support '-' also",
"# TODO: (optimization) if `kwargs.get(... | Given an URI, detects plugin and encoding and imports into a `rows.Table` | [
"Given",
"an",
"URI",
"detects",
"plugin",
"and",
"encoding",
"and",
"imports",
"into",
"a",
"rows",
".",
"Table"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L487-L496 | train | 228,036 |
turicas/rows | rows/utils.py | open_compressed | def open_compressed(filename, mode="r", encoding=None):
"Return a text-based file object from a filename, even if compressed"
# TODO: integrate this function in the library itself, using
# get_filename_and_fobj
binary_mode = "b" in mode
extension = str(filename).split(".")[-1].lower()
if binary_mode and encoding:
raise ValueError("encoding should not be specified in binary mode")
if extension == "xz":
if lzma is None:
raise RuntimeError("lzma support is not installed")
fobj = lzma.open(filename, mode=mode)
if binary_mode:
return fobj
else:
return io.TextIOWrapper(fobj, encoding=encoding)
elif extension == "gz":
fobj = gzip.GzipFile(filename, mode=mode)
if binary_mode:
return fobj
else:
return io.TextIOWrapper(fobj, encoding=encoding)
elif extension == "bz2":
if bz2 is None:
raise RuntimeError("bzip2 support is not installed")
if binary_mode: # ignore encoding
return bz2.open(filename, mode=mode)
else:
if "t" not in mode:
# For some reason, passing only mode='r' to bzip2 is equivalent
# to 'rb', not 'rt', so we force it here.
mode += "t"
return bz2.open(filename, mode=mode, encoding=encoding)
else:
if binary_mode:
return open(filename, mode=mode)
else:
return open(filename, mode=mode, encoding=encoding) | python | def open_compressed(filename, mode="r", encoding=None):
"Return a text-based file object from a filename, even if compressed"
# TODO: integrate this function in the library itself, using
# get_filename_and_fobj
binary_mode = "b" in mode
extension = str(filename).split(".")[-1].lower()
if binary_mode and encoding:
raise ValueError("encoding should not be specified in binary mode")
if extension == "xz":
if lzma is None:
raise RuntimeError("lzma support is not installed")
fobj = lzma.open(filename, mode=mode)
if binary_mode:
return fobj
else:
return io.TextIOWrapper(fobj, encoding=encoding)
elif extension == "gz":
fobj = gzip.GzipFile(filename, mode=mode)
if binary_mode:
return fobj
else:
return io.TextIOWrapper(fobj, encoding=encoding)
elif extension == "bz2":
if bz2 is None:
raise RuntimeError("bzip2 support is not installed")
if binary_mode: # ignore encoding
return bz2.open(filename, mode=mode)
else:
if "t" not in mode:
# For some reason, passing only mode='r' to bzip2 is equivalent
# to 'rb', not 'rt', so we force it here.
mode += "t"
return bz2.open(filename, mode=mode, encoding=encoding)
else:
if binary_mode:
return open(filename, mode=mode)
else:
return open(filename, mode=mode, encoding=encoding) | [
"def",
"open_compressed",
"(",
"filename",
",",
"mode",
"=",
"\"r\"",
",",
"encoding",
"=",
"None",
")",
":",
"# TODO: integrate this function in the library itself, using",
"# get_filename_and_fobj",
"binary_mode",
"=",
"\"b\"",
"in",
"mode",
"extension",
"=",
"str",
... | Return a text-based file object from a filename, even if compressed | [
"Return",
"a",
"text",
"-",
"based",
"file",
"object",
"from",
"a",
"filename",
"even",
"if",
"compressed"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L513-L557 | train | 228,037 |
turicas/rows | rows/utils.py | csv_to_sqlite | def csv_to_sqlite(
input_filename,
output_filename,
samples=None,
dialect=None,
batch_size=10000,
encoding="utf-8",
callback=None,
force_types=None,
chunk_size=8388608,
table_name="table1",
schema=None,
):
"Export a CSV file to SQLite, based on field type detection from samples"
# TODO: automatically detect encoding if encoding == `None`
# TODO: should be able to specify fields
# TODO: if table_name is "2019" the final name will be "field_2019" - must
# be "table_2019"
# TODO: if schema is provided and the names are in uppercase, this function
# will fail
if dialect is None: # Get a sample to detect dialect
fobj = open_compressed(input_filename, mode="rb")
sample = fobj.read(chunk_size)
dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding)
elif isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if schema is None: # Identify data types
fobj = open_compressed(input_filename, encoding=encoding)
data = list(islice(csv.DictReader(fobj, dialect=dialect), samples))
schema = rows.import_from_dicts(data).fields
if force_types is not None:
schema.update(force_types)
# Create lazy table object to be converted
# TODO: this lazyness feature will be incorported into the library soon so
# we can call here `rows.import_from_csv` instead of `csv.reader`.
reader = csv.reader(
open_compressed(input_filename, encoding=encoding), dialect=dialect
)
header = make_header(next(reader)) # skip header
table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header]))
table._rows = reader
# Export to SQLite
return rows.export_to_sqlite(
table,
output_filename,
table_name=table_name,
batch_size=batch_size,
callback=callback,
) | python | def csv_to_sqlite(
input_filename,
output_filename,
samples=None,
dialect=None,
batch_size=10000,
encoding="utf-8",
callback=None,
force_types=None,
chunk_size=8388608,
table_name="table1",
schema=None,
):
"Export a CSV file to SQLite, based on field type detection from samples"
# TODO: automatically detect encoding if encoding == `None`
# TODO: should be able to specify fields
# TODO: if table_name is "2019" the final name will be "field_2019" - must
# be "table_2019"
# TODO: if schema is provided and the names are in uppercase, this function
# will fail
if dialect is None: # Get a sample to detect dialect
fobj = open_compressed(input_filename, mode="rb")
sample = fobj.read(chunk_size)
dialect = rows.plugins.csv.discover_dialect(sample, encoding=encoding)
elif isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if schema is None: # Identify data types
fobj = open_compressed(input_filename, encoding=encoding)
data = list(islice(csv.DictReader(fobj, dialect=dialect), samples))
schema = rows.import_from_dicts(data).fields
if force_types is not None:
schema.update(force_types)
# Create lazy table object to be converted
# TODO: this lazyness feature will be incorported into the library soon so
# we can call here `rows.import_from_csv` instead of `csv.reader`.
reader = csv.reader(
open_compressed(input_filename, encoding=encoding), dialect=dialect
)
header = make_header(next(reader)) # skip header
table = rows.Table(fields=OrderedDict([(field, schema[field]) for field in header]))
table._rows = reader
# Export to SQLite
return rows.export_to_sqlite(
table,
output_filename,
table_name=table_name,
batch_size=batch_size,
callback=callback,
) | [
"def",
"csv_to_sqlite",
"(",
"input_filename",
",",
"output_filename",
",",
"samples",
"=",
"None",
",",
"dialect",
"=",
"None",
",",
"batch_size",
"=",
"10000",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"callback",
"=",
"None",
",",
"force_types",
"=",
"None"... | Export a CSV file to SQLite, based on field type detection from samples | [
"Export",
"a",
"CSV",
"file",
"to",
"SQLite",
"based",
"on",
"field",
"type",
"detection",
"from",
"samples"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L560-L613 | train | 228,038 |
turicas/rows | rows/utils.py | sqlite_to_csv | def sqlite_to_csv(
input_filename,
table_name,
output_filename,
dialect=csv.excel,
batch_size=10000,
encoding="utf-8",
callback=None,
query=None,
):
"""Export a table inside a SQLite database to CSV"""
# TODO: should be able to specify fields
# TODO: should be able to specify custom query
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if query is None:
query = "SELECT * FROM {}".format(table_name)
connection = sqlite3.Connection(input_filename)
cursor = connection.cursor()
result = cursor.execute(query)
header = [item[0] for item in cursor.description]
fobj = open_compressed(output_filename, mode="w", encoding=encoding)
writer = csv.writer(fobj, dialect=dialect)
writer.writerow(header)
total_written = 0
for batch in rows.plugins.utils.ipartition(result, batch_size):
writer.writerows(batch)
written = len(batch)
total_written += written
if callback:
callback(written, total_written)
fobj.close() | python | def sqlite_to_csv(
input_filename,
table_name,
output_filename,
dialect=csv.excel,
batch_size=10000,
encoding="utf-8",
callback=None,
query=None,
):
"""Export a table inside a SQLite database to CSV"""
# TODO: should be able to specify fields
# TODO: should be able to specify custom query
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if query is None:
query = "SELECT * FROM {}".format(table_name)
connection = sqlite3.Connection(input_filename)
cursor = connection.cursor()
result = cursor.execute(query)
header = [item[0] for item in cursor.description]
fobj = open_compressed(output_filename, mode="w", encoding=encoding)
writer = csv.writer(fobj, dialect=dialect)
writer.writerow(header)
total_written = 0
for batch in rows.plugins.utils.ipartition(result, batch_size):
writer.writerows(batch)
written = len(batch)
total_written += written
if callback:
callback(written, total_written)
fobj.close() | [
"def",
"sqlite_to_csv",
"(",
"input_filename",
",",
"table_name",
",",
"output_filename",
",",
"dialect",
"=",
"csv",
".",
"excel",
",",
"batch_size",
"=",
"10000",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"callback",
"=",
"None",
",",
"query",
"=",
"None",
... | Export a table inside a SQLite database to CSV | [
"Export",
"a",
"table",
"inside",
"a",
"SQLite",
"database",
"to",
"CSV"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L616-L650 | train | 228,039 |
turicas/rows | rows/utils.py | execute_command | def execute_command(command):
"""Execute a command and return its output"""
command = shlex.split(command)
try:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except FileNotFoundError:
raise RuntimeError("Command not found: {}".format(repr(command)))
process.wait()
# TODO: may use another codec to decode
if process.returncode > 0:
stderr = process.stderr.read().decode("utf-8")
raise ValueError("Error executing command: {}".format(repr(stderr)))
return process.stdout.read().decode("utf-8") | python | def execute_command(command):
"""Execute a command and return its output"""
command = shlex.split(command)
try:
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except FileNotFoundError:
raise RuntimeError("Command not found: {}".format(repr(command)))
process.wait()
# TODO: may use another codec to decode
if process.returncode > 0:
stderr = process.stderr.read().decode("utf-8")
raise ValueError("Error executing command: {}".format(repr(stderr)))
return process.stdout.read().decode("utf-8") | [
"def",
"execute_command",
"(",
"command",
")",
":",
"command",
"=",
"shlex",
".",
"split",
"(",
"command",
")",
"try",
":",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdin",
"=",
"subprocess",
".",
"PIPE",
",",
"stdout",
"=",
"s... | Execute a command and return its output | [
"Execute",
"a",
"command",
"and",
"return",
"its",
"output"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L706-L724 | train | 228,040 |
turicas/rows | rows/utils.py | uncompressed_size | def uncompressed_size(filename):
"""Return the uncompressed size for a file by executing commands
Note: due to a limitation in gzip format, uncompressed files greather than
4GiB will have a wrong value.
"""
quoted_filename = shlex.quote(filename)
# TODO: get filetype from file-magic, if available
if str(filename).lower().endswith(".xz"):
output = execute_command('xz --list "{}"'.format(quoted_filename))
compressed, uncompressed = regexp_sizes.findall(output)
value, unit = uncompressed.split()
value = float(value.replace(",", ""))
return int(value * MULTIPLIERS[unit])
elif str(filename).lower().endswith(".gz"):
# XXX: gzip only uses 32 bits to store uncompressed size, so if the
# uncompressed size is greater than 4GiB, the value returned will be
# incorrect.
output = execute_command('gzip --list "{}"'.format(quoted_filename))
lines = [line.split() for line in output.splitlines()]
header, data = lines[0], lines[1]
gzip_data = dict(zip(header, data))
return int(gzip_data["uncompressed"])
else:
raise ValueError('Unrecognized file type for "{}".'.format(filename)) | python | def uncompressed_size(filename):
"""Return the uncompressed size for a file by executing commands
Note: due to a limitation in gzip format, uncompressed files greather than
4GiB will have a wrong value.
"""
quoted_filename = shlex.quote(filename)
# TODO: get filetype from file-magic, if available
if str(filename).lower().endswith(".xz"):
output = execute_command('xz --list "{}"'.format(quoted_filename))
compressed, uncompressed = regexp_sizes.findall(output)
value, unit = uncompressed.split()
value = float(value.replace(",", ""))
return int(value * MULTIPLIERS[unit])
elif str(filename).lower().endswith(".gz"):
# XXX: gzip only uses 32 bits to store uncompressed size, so if the
# uncompressed size is greater than 4GiB, the value returned will be
# incorrect.
output = execute_command('gzip --list "{}"'.format(quoted_filename))
lines = [line.split() for line in output.splitlines()]
header, data = lines[0], lines[1]
gzip_data = dict(zip(header, data))
return int(gzip_data["uncompressed"])
else:
raise ValueError('Unrecognized file type for "{}".'.format(filename)) | [
"def",
"uncompressed_size",
"(",
"filename",
")",
":",
"quoted_filename",
"=",
"shlex",
".",
"quote",
"(",
"filename",
")",
"# TODO: get filetype from file-magic, if available",
"if",
"str",
"(",
"filename",
")",
".",
"lower",
"(",
")",
".",
"endswith",
"(",
"\"... | Return the uncompressed size for a file by executing commands
Note: due to a limitation in gzip format, uncompressed files greather than
4GiB will have a wrong value. | [
"Return",
"the",
"uncompressed",
"size",
"for",
"a",
"file",
"by",
"executing",
"commands"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L727-L755 | train | 228,041 |
turicas/rows | rows/utils.py | pgimport | def pgimport(
filename,
database_uri,
table_name,
encoding="utf-8",
dialect=None,
create_table=True,
schema=None,
callback=None,
timeout=0.1,
chunk_size=8388608,
max_samples=10000,
):
"""Import data from CSV into PostgreSQL using the fastest method
Required: psql command
"""
fobj = open_compressed(filename, mode="r", encoding=encoding)
sample = fobj.read(chunk_size)
if dialect is None: # Detect dialect
dialect = rows.plugins.csv.discover_dialect(
sample.encode(encoding), encoding=encoding
)
elif isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if schema is None:
# Detect field names
reader = csv.reader(io.StringIO(sample), dialect=dialect)
field_names = [slug(field_name) for field_name in next(reader)]
else:
field_names = list(schema.keys())
if create_table:
if schema is None:
data = [
dict(zip(field_names, row))
for row in itertools.islice(reader, max_samples)
]
table = rows.import_from_dicts(data)
field_types = [table.fields[field_name] for field_name in field_names]
else:
field_types = list(schema.values())
columns = [
"{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE))
for name, type_ in zip(field_names, field_types)
]
create_table = SQL_CREATE_TABLE.format(
table_name=table_name, field_types=", ".join(columns)
)
execute_command(get_psql_command(create_table, database_uri=database_uri))
# Prepare the `psql` command to be executed based on collected metadata
command = get_psql_copy_command(
database_uri=database_uri,
dialect=dialect,
direction="FROM",
encoding=encoding,
header=field_names,
table_name=table_name,
)
rows_imported, error = 0, None
fobj = open_compressed(filename, mode="rb")
try:
process = subprocess.Popen(
shlex.split(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
data = fobj.read(chunk_size)
total_written = 0
while data != b"":
written = process.stdin.write(data)
total_written += written
if callback:
callback(written, total_written)
data = fobj.read(chunk_size)
stdout, stderr = process.communicate()
if stderr != b"":
raise RuntimeError(stderr.decode("utf-8"))
rows_imported = int(stdout.replace(b"COPY ", b"").strip())
except FileNotFoundError:
raise RuntimeError("Command `psql` not found")
except BrokenPipeError:
raise RuntimeError(process.stderr.read().decode("utf-8"))
return {"bytes_written": total_written, "rows_imported": rows_imported} | python | def pgimport(
filename,
database_uri,
table_name,
encoding="utf-8",
dialect=None,
create_table=True,
schema=None,
callback=None,
timeout=0.1,
chunk_size=8388608,
max_samples=10000,
):
"""Import data from CSV into PostgreSQL using the fastest method
Required: psql command
"""
fobj = open_compressed(filename, mode="r", encoding=encoding)
sample = fobj.read(chunk_size)
if dialect is None: # Detect dialect
dialect = rows.plugins.csv.discover_dialect(
sample.encode(encoding), encoding=encoding
)
elif isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
if schema is None:
# Detect field names
reader = csv.reader(io.StringIO(sample), dialect=dialect)
field_names = [slug(field_name) for field_name in next(reader)]
else:
field_names = list(schema.keys())
if create_table:
if schema is None:
data = [
dict(zip(field_names, row))
for row in itertools.islice(reader, max_samples)
]
table = rows.import_from_dicts(data)
field_types = [table.fields[field_name] for field_name in field_names]
else:
field_types = list(schema.values())
columns = [
"{} {}".format(name, POSTGRESQL_TYPES.get(type_, DEFAULT_POSTGRESQL_TYPE))
for name, type_ in zip(field_names, field_types)
]
create_table = SQL_CREATE_TABLE.format(
table_name=table_name, field_types=", ".join(columns)
)
execute_command(get_psql_command(create_table, database_uri=database_uri))
# Prepare the `psql` command to be executed based on collected metadata
command = get_psql_copy_command(
database_uri=database_uri,
dialect=dialect,
direction="FROM",
encoding=encoding,
header=field_names,
table_name=table_name,
)
rows_imported, error = 0, None
fobj = open_compressed(filename, mode="rb")
try:
process = subprocess.Popen(
shlex.split(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
data = fobj.read(chunk_size)
total_written = 0
while data != b"":
written = process.stdin.write(data)
total_written += written
if callback:
callback(written, total_written)
data = fobj.read(chunk_size)
stdout, stderr = process.communicate()
if stderr != b"":
raise RuntimeError(stderr.decode("utf-8"))
rows_imported = int(stdout.replace(b"COPY ", b"").strip())
except FileNotFoundError:
raise RuntimeError("Command `psql` not found")
except BrokenPipeError:
raise RuntimeError(process.stderr.read().decode("utf-8"))
return {"bytes_written": total_written, "rows_imported": rows_imported} | [
"def",
"pgimport",
"(",
"filename",
",",
"database_uri",
",",
"table_name",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"dialect",
"=",
"None",
",",
"create_table",
"=",
"True",
",",
"schema",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"timeout",
"=",
"... | Import data from CSV into PostgreSQL using the fastest method
Required: psql command | [
"Import",
"data",
"from",
"CSV",
"into",
"PostgreSQL",
"using",
"the",
"fastest",
"method"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L831-L924 | train | 228,042 |
turicas/rows | rows/utils.py | pgexport | def pgexport(
database_uri,
table_name,
filename,
encoding="utf-8",
dialect=csv.excel,
callback=None,
timeout=0.1,
chunk_size=8388608,
):
"""Export data from PostgreSQL into a CSV file using the fastest method
Required: psql command
"""
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
# Prepare the `psql` command to be executed to export data
command = get_psql_copy_command(
database_uri=database_uri,
direction="TO",
encoding=encoding,
header=None, # Needed when direction = 'TO'
table_name=table_name,
dialect=dialect,
)
fobj = open_compressed(filename, mode="wb")
try:
process = subprocess.Popen(
shlex.split(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
total_written = 0
data = process.stdout.read(chunk_size)
while data != b"":
written = fobj.write(data)
total_written += written
if callback:
callback(written, total_written)
data = process.stdout.read(chunk_size)
stdout, stderr = process.communicate()
if stderr != b"":
raise RuntimeError(stderr.decode("utf-8"))
except FileNotFoundError:
raise RuntimeError("Command `psql` not found")
except BrokenPipeError:
raise RuntimeError(process.stderr.read().decode("utf-8"))
return {"bytes_written": total_written} | python | def pgexport(
database_uri,
table_name,
filename,
encoding="utf-8",
dialect=csv.excel,
callback=None,
timeout=0.1,
chunk_size=8388608,
):
"""Export data from PostgreSQL into a CSV file using the fastest method
Required: psql command
"""
if isinstance(dialect, six.text_type):
dialect = csv.get_dialect(dialect)
# Prepare the `psql` command to be executed to export data
command = get_psql_copy_command(
database_uri=database_uri,
direction="TO",
encoding=encoding,
header=None, # Needed when direction = 'TO'
table_name=table_name,
dialect=dialect,
)
fobj = open_compressed(filename, mode="wb")
try:
process = subprocess.Popen(
shlex.split(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
total_written = 0
data = process.stdout.read(chunk_size)
while data != b"":
written = fobj.write(data)
total_written += written
if callback:
callback(written, total_written)
data = process.stdout.read(chunk_size)
stdout, stderr = process.communicate()
if stderr != b"":
raise RuntimeError(stderr.decode("utf-8"))
except FileNotFoundError:
raise RuntimeError("Command `psql` not found")
except BrokenPipeError:
raise RuntimeError(process.stderr.read().decode("utf-8"))
return {"bytes_written": total_written} | [
"def",
"pgexport",
"(",
"database_uri",
",",
"table_name",
",",
"filename",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"dialect",
"=",
"csv",
".",
"excel",
",",
"callback",
"=",
"None",
",",
"timeout",
"=",
"0.1",
",",
"chunk_size",
"=",
"8388608",
",",
")"... | Export data from PostgreSQL into a CSV file using the fastest method
Required: psql command | [
"Export",
"data",
"from",
"PostgreSQL",
"into",
"a",
"CSV",
"file",
"using",
"the",
"fastest",
"method"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L927-L980 | train | 228,043 |
turicas/rows | rows/utils.py | load_schema | def load_schema(filename, context=None):
"""Load schema from file in any of the supported formats
The table must have at least the fields `field_name` and `field_type`.
`context` is a `dict` with field_type as key pointing to field class, like:
{"text": rows.fields.TextField, "value": MyCustomField}
"""
table = import_from_uri(filename)
field_names = table.field_names
assert "field_name" in field_names
assert "field_type" in field_names
context = context or {
key.replace("Field", "").lower(): getattr(rows.fields, key)
for key in dir(rows.fields)
if "Field" in key and key != "Field"
}
return OrderedDict(
[
(row.field_name, context[row.field_type])
for row in table
]
) | python | def load_schema(filename, context=None):
"""Load schema from file in any of the supported formats
The table must have at least the fields `field_name` and `field_type`.
`context` is a `dict` with field_type as key pointing to field class, like:
{"text": rows.fields.TextField, "value": MyCustomField}
"""
table = import_from_uri(filename)
field_names = table.field_names
assert "field_name" in field_names
assert "field_type" in field_names
context = context or {
key.replace("Field", "").lower(): getattr(rows.fields, key)
for key in dir(rows.fields)
if "Field" in key and key != "Field"
}
return OrderedDict(
[
(row.field_name, context[row.field_type])
for row in table
]
) | [
"def",
"load_schema",
"(",
"filename",
",",
"context",
"=",
"None",
")",
":",
"table",
"=",
"import_from_uri",
"(",
"filename",
")",
"field_names",
"=",
"table",
".",
"field_names",
"assert",
"\"field_name\"",
"in",
"field_names",
"assert",
"\"field_type\"",
"in... | Load schema from file in any of the supported formats
The table must have at least the fields `field_name` and `field_type`.
`context` is a `dict` with field_type as key pointing to field class, like:
{"text": rows.fields.TextField, "value": MyCustomField} | [
"Load",
"schema",
"from",
"file",
"in",
"any",
"of",
"the",
"supported",
"formats"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/utils.py#L1082-L1104 | train | 228,044 |
turicas/rows | rows/fields.py | slug | def slug(text, separator="_", permitted_chars=SLUG_CHARS):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace word boundaries with separator
text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
allowed_chars = list(permitted_chars) + [separator]
text = "".join(char for char in text if char in allowed_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
text = (
REGEXP_SEPARATOR
if separator == "_"
else re.compile("(" + re.escape(separator) + "+)")
).sub(separator, text)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator) | python | def slug(text, separator="_", permitted_chars=SLUG_CHARS):
"""Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen'
"""
text = six.text_type(text or "")
# Strip non-ASCII characters
# Example: u' ÁLVARO justen% ' -> ' ALVARO justen% '
text = normalize("NFKD", text.strip()).encode("ascii", "ignore").decode("ascii")
# Replace word boundaries with separator
text = REGEXP_WORD_BOUNDARY.sub("\\1" + re.escape(separator), text)
# Remove non-permitted characters and put everything to lowercase
# Example: u'_ALVARO__justen%_' -> u'_alvaro__justen_'
allowed_chars = list(permitted_chars) + [separator]
text = "".join(char for char in text if char in allowed_chars).lower()
# Remove double occurrencies of separator
# Example: u'_alvaro__justen_' -> u'_alvaro_justen_'
text = (
REGEXP_SEPARATOR
if separator == "_"
else re.compile("(" + re.escape(separator) + "+)")
).sub(separator, text)
# Strip separators
# Example: u'_alvaro_justen_' -> u'alvaro_justen'
return text.strip(separator) | [
"def",
"slug",
"(",
"text",
",",
"separator",
"=",
"\"_\"",
",",
"permitted_chars",
"=",
"SLUG_CHARS",
")",
":",
"text",
"=",
"six",
".",
"text_type",
"(",
"text",
"or",
"\"\"",
")",
"# Strip non-ASCII characters",
"# Example: u' ÁLVARO justen% ' -> ' ALVARO juste... | Generate a slug for the `text`.
>>> slug(' ÁLVARO justen% ')
'alvaro_justen'
>>> slug(' ÁLVARO justen% ', separator='-')
'alvaro-justen' | [
"Generate",
"a",
"slug",
"for",
"the",
"text",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L520-L553 | train | 228,045 |
turicas/rows | rows/fields.py | make_unique_name | def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name | python | def make_unique_name(name, existing_names, name_format="{name}_{index}", start=2):
"""Return a unique name based on `name_format` and `name`."""
index = start
new_name = name
while new_name in existing_names:
new_name = name_format.format(name=name, index=index)
index += 1
return new_name | [
"def",
"make_unique_name",
"(",
"name",
",",
"existing_names",
",",
"name_format",
"=",
"\"{name}_{index}\"",
",",
"start",
"=",
"2",
")",
":",
"index",
"=",
"start",
"new_name",
"=",
"name",
"while",
"new_name",
"in",
"existing_names",
":",
"new_name",
"=",
... | Return a unique name based on `name_format` and `name`. | [
"Return",
"a",
"unique",
"name",
"based",
"on",
"name_format",
"and",
"name",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L556-L564 | train | 228,046 |
turicas/rows | rows/fields.py | make_header | def make_header(field_names, permit_not=False):
"""Return unique and slugged field names."""
slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^"
header = [
slug(field_name, permitted_chars=slug_chars) for field_name in field_names
]
result = []
for index, field_name in enumerate(header):
if not field_name:
field_name = "field_{}".format(index)
elif field_name[0].isdigit():
field_name = "field_{}".format(field_name)
if field_name in result:
field_name = make_unique_name(
name=field_name, existing_names=result, start=2
)
result.append(field_name)
return result | python | def make_header(field_names, permit_not=False):
"""Return unique and slugged field names."""
slug_chars = SLUG_CHARS if not permit_not else SLUG_CHARS + "^"
header = [
slug(field_name, permitted_chars=slug_chars) for field_name in field_names
]
result = []
for index, field_name in enumerate(header):
if not field_name:
field_name = "field_{}".format(index)
elif field_name[0].isdigit():
field_name = "field_{}".format(field_name)
if field_name in result:
field_name = make_unique_name(
name=field_name, existing_names=result, start=2
)
result.append(field_name)
return result | [
"def",
"make_header",
"(",
"field_names",
",",
"permit_not",
"=",
"False",
")",
":",
"slug_chars",
"=",
"SLUG_CHARS",
"if",
"not",
"permit_not",
"else",
"SLUG_CHARS",
"+",
"\"^\"",
"header",
"=",
"[",
"slug",
"(",
"field_name",
",",
"permitted_chars",
"=",
"... | Return unique and slugged field names. | [
"Return",
"unique",
"and",
"slugged",
"field",
"names",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L567-L587 | train | 228,047 |
turicas/rows | rows/fields.py | Field.deserialize | def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value | python | def deserialize(cls, value, *args, **kwargs):
"""Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`.
"""
if isinstance(value, cls.TYPE):
return value
elif is_null(value):
return None
else:
return value | [
"def",
"deserialize",
"(",
"cls",
",",
"value",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"cls",
".",
"TYPE",
")",
":",
"return",
"value",
"elif",
"is_null",
"(",
"value",
")",
":",
"return",
"None"... | Deserialize a value just after importing it
`cls.deserialize` should always return a value of type `cls.TYPE` or
`None`. | [
"Deserialize",
"a",
"value",
"just",
"after",
"importing",
"it"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/fields.py#L91-L103 | train | 228,048 |
turicas/rows | rows/plugins/plugin_pdf.py | ExtractionAlgorithm.selected_objects | def selected_objects(self):
"""Filter out objects outside table boundaries"""
return [
obj
for obj in self.text_objects
if contains_or_overlap(self.table_bbox, obj.bbox)
] | python | def selected_objects(self):
"""Filter out objects outside table boundaries"""
return [
obj
for obj in self.text_objects
if contains_or_overlap(self.table_bbox, obj.bbox)
] | [
"def",
"selected_objects",
"(",
"self",
")",
":",
"return",
"[",
"obj",
"for",
"obj",
"in",
"self",
".",
"text_objects",
"if",
"contains_or_overlap",
"(",
"self",
".",
"table_bbox",
",",
"obj",
".",
"bbox",
")",
"]"
] | Filter out objects outside table boundaries | [
"Filter",
"out",
"objects",
"outside",
"table",
"boundaries"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_pdf.py#L446-L453 | train | 228,049 |
turicas/rows | examples/library/extract_links.py | transform | def transform(row, table):
'Extract links from "project" field and remove HTML from all'
data = row._asdict()
data["links"] = " ".join(extract_links(row.project))
for key, value in data.items():
if isinstance(value, six.text_type):
data[key] = extract_text(value)
return data | python | def transform(row, table):
'Extract links from "project" field and remove HTML from all'
data = row._asdict()
data["links"] = " ".join(extract_links(row.project))
for key, value in data.items():
if isinstance(value, six.text_type):
data[key] = extract_text(value)
return data | [
"def",
"transform",
"(",
"row",
",",
"table",
")",
":",
"data",
"=",
"row",
".",
"_asdict",
"(",
")",
"data",
"[",
"\"links\"",
"]",
"=",
"\" \"",
".",
"join",
"(",
"extract_links",
"(",
"row",
".",
"project",
")",
")",
"for",
"key",
",",
"value",
... | Extract links from "project" field and remove HTML from all | [
"Extract",
"links",
"from",
"project",
"field",
"and",
"remove",
"HTML",
"from",
"all"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/extract_links.py#L24-L32 | train | 228,050 |
turicas/rows | examples/library/brazilian_cities_wikipedia.py | transform | def transform(row, table):
'Transform row "link" into full URL and add "state" based on "name"'
data = row._asdict()
data["link"] = urljoin("https://pt.wikipedia.org", data["link"])
data["name"], data["state"] = regexp_city_state.findall(data["name"])[0]
return data | python | def transform(row, table):
'Transform row "link" into full URL and add "state" based on "name"'
data = row._asdict()
data["link"] = urljoin("https://pt.wikipedia.org", data["link"])
data["name"], data["state"] = regexp_city_state.findall(data["name"])[0]
return data | [
"def",
"transform",
"(",
"row",
",",
"table",
")",
":",
"data",
"=",
"row",
".",
"_asdict",
"(",
")",
"data",
"[",
"\"link\"",
"]",
"=",
"urljoin",
"(",
"\"https://pt.wikipedia.org\"",
",",
"data",
"[",
"\"link\"",
"]",
")",
"data",
"[",
"\"name\"",
"]... | Transform row "link" into full URL and add "state" based on "name" | [
"Transform",
"row",
"link",
"into",
"full",
"URL",
"and",
"add",
"state",
"based",
"on",
"name"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/brazilian_cities_wikipedia.py#L34-L40 | train | 228,051 |
turicas/rows | rows/plugins/plugin_parquet.py | import_from_parquet | def import_from_parquet(filename_or_fobj, *args, **kwargs):
"""Import data from a Parquet file and return with rows.Table."""
source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb")
# TODO: should look into `schema.converted_type` also
types = OrderedDict(
[
(schema.name, PARQUET_TO_ROWS[schema.type])
for schema in parquet._read_footer(source.fobj).schema
if schema.type is not None
]
)
header = list(types.keys())
table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy
meta = {"imported_from": "parquet", "source": source}
return create_table(
[header] + table_rows, meta=meta, force_types=types, *args, **kwargs
) | python | def import_from_parquet(filename_or_fobj, *args, **kwargs):
"""Import data from a Parquet file and return with rows.Table."""
source = Source.from_file(filename_or_fobj, plugin_name="parquet", mode="rb")
# TODO: should look into `schema.converted_type` also
types = OrderedDict(
[
(schema.name, PARQUET_TO_ROWS[schema.type])
for schema in parquet._read_footer(source.fobj).schema
if schema.type is not None
]
)
header = list(types.keys())
table_rows = list(parquet.reader(source.fobj)) # TODO: be lazy
meta = {"imported_from": "parquet", "source": source}
return create_table(
[header] + table_rows, meta=meta, force_types=types, *args, **kwargs
) | [
"def",
"import_from_parquet",
"(",
"filename_or_fobj",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"Source",
".",
"from_file",
"(",
"filename_or_fobj",
",",
"plugin_name",
"=",
"\"parquet\"",
",",
"mode",
"=",
"\"rb\"",
")",
"# TODO: ... | Import data from a Parquet file and return with rows.Table. | [
"Import",
"data",
"from",
"a",
"Parquet",
"file",
"and",
"return",
"with",
"rows",
".",
"Table",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_parquet.py#L47-L65 | train | 228,052 |
turicas/rows | rows/plugins/dicts.py | import_from_dicts | def import_from_dicts(data, samples=None, *args, **kwargs):
"""Import data from a iterable of dicts
The algorithm will use the `samples` first `dict`s to determine the field
names (if `samples` is `None` all `dict`s will be used).
"""
data = iter(data)
cached_rows, headers = [], []
for index, row in enumerate(data, start=1):
cached_rows.append(row)
for key in row.keys():
if key not in headers:
headers.append(key)
if samples and index == samples:
break
data_rows = (
[row.get(header, None) for header in headers]
for row in chain(cached_rows, data)
)
kwargs["samples"] = samples
meta = {"imported_from": "dicts"}
return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs) | python | def import_from_dicts(data, samples=None, *args, **kwargs):
"""Import data from a iterable of dicts
The algorithm will use the `samples` first `dict`s to determine the field
names (if `samples` is `None` all `dict`s will be used).
"""
data = iter(data)
cached_rows, headers = [], []
for index, row in enumerate(data, start=1):
cached_rows.append(row)
for key in row.keys():
if key not in headers:
headers.append(key)
if samples and index == samples:
break
data_rows = (
[row.get(header, None) for header in headers]
for row in chain(cached_rows, data)
)
kwargs["samples"] = samples
meta = {"imported_from": "dicts"}
return create_table(chain([headers], data_rows), meta=meta, *args, **kwargs) | [
"def",
"import_from_dicts",
"(",
"data",
",",
"samples",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"data",
"=",
"iter",
"(",
"data",
")",
"cached_rows",
",",
"headers",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"index",
",",
"r... | Import data from a iterable of dicts
The algorithm will use the `samples` first `dict`s to determine the field
names (if `samples` is `None` all `dict`s will be used). | [
"Import",
"data",
"from",
"a",
"iterable",
"of",
"dicts"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/dicts.py#L25-L52 | train | 228,053 |
turicas/rows | rows/plugins/dicts.py | export_to_dicts | def export_to_dicts(table, *args, **kwargs):
"""Export a `rows.Table` to a list of dicts"""
field_names = table.field_names
return [{key: getattr(row, key) for key in field_names} for row in table] | python | def export_to_dicts(table, *args, **kwargs):
"""Export a `rows.Table` to a list of dicts"""
field_names = table.field_names
return [{key: getattr(row, key) for key in field_names} for row in table] | [
"def",
"export_to_dicts",
"(",
"table",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"field_names",
"=",
"table",
".",
"field_names",
"return",
"[",
"{",
"key",
":",
"getattr",
"(",
"row",
",",
"key",
")",
"for",
"key",
"in",
"field_names",
"... | Export a `rows.Table` to a list of dicts | [
"Export",
"a",
"rows",
".",
"Table",
"to",
"a",
"list",
"of",
"dicts"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/dicts.py#L55-L58 | train | 228,054 |
turicas/rows | rows/plugins/xls.py | cell_value | def cell_value(sheet, row, col):
"""Return the cell value of the table passed by argument, based in row and column."""
cell = sheet.cell(row, col)
field_type = CELL_TYPES[cell.ctype]
# TODO: this approach will not work if using locale
value = cell.value
if field_type is None:
return None
elif field_type is fields.TextField:
if cell.ctype != xlrd.XL_CELL_BLANK:
return value
else:
return ""
elif field_type is fields.DatetimeField:
if value == 0.0:
return None
try:
time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode)
except xlrd.xldate.XLDateTooLarge:
return None
value = field_type.serialize(datetime.datetime(*time_tuple))
return value.split("T00:00:00")[0]
elif field_type is fields.BoolField:
if value == 0:
return False
elif value == 1:
return True
elif cell.xf_index is None:
return value # TODO: test
else:
book = sheet.book
xf = book.xf_list[cell.xf_index]
fmt = book.format_map[xf.format_key]
if fmt.format_str.endswith("%"):
# TODO: we may optimize this approach: we're converting to string
# and the library is detecting the type when we could just say to
# the library this value is PercentField
if value is not None:
try:
decimal_places = len(fmt.format_str[:-1].split(".")[-1])
except IndexError:
decimal_places = 2
return "{}%".format(str(round(value * 100, decimal_places)))
else:
return None
elif type(value) == float and int(value) == value:
return int(value)
else:
return value | python | def cell_value(sheet, row, col):
"""Return the cell value of the table passed by argument, based in row and column."""
cell = sheet.cell(row, col)
field_type = CELL_TYPES[cell.ctype]
# TODO: this approach will not work if using locale
value = cell.value
if field_type is None:
return None
elif field_type is fields.TextField:
if cell.ctype != xlrd.XL_CELL_BLANK:
return value
else:
return ""
elif field_type is fields.DatetimeField:
if value == 0.0:
return None
try:
time_tuple = xlrd.xldate_as_tuple(value, sheet.book.datemode)
except xlrd.xldate.XLDateTooLarge:
return None
value = field_type.serialize(datetime.datetime(*time_tuple))
return value.split("T00:00:00")[0]
elif field_type is fields.BoolField:
if value == 0:
return False
elif value == 1:
return True
elif cell.xf_index is None:
return value # TODO: test
else:
book = sheet.book
xf = book.xf_list[cell.xf_index]
fmt = book.format_map[xf.format_key]
if fmt.format_str.endswith("%"):
# TODO: we may optimize this approach: we're converting to string
# and the library is detecting the type when we could just say to
# the library this value is PercentField
if value is not None:
try:
decimal_places = len(fmt.format_str[:-1].split(".")[-1])
except IndexError:
decimal_places = 2
return "{}%".format(str(round(value * 100, decimal_places)))
else:
return None
elif type(value) == float and int(value) == value:
return int(value)
else:
return value | [
"def",
"cell_value",
"(",
"sheet",
",",
"row",
",",
"col",
")",
":",
"cell",
"=",
"sheet",
".",
"cell",
"(",
"row",
",",
"col",
")",
"field_type",
"=",
"CELL_TYPES",
"[",
"cell",
".",
"ctype",
"]",
"# TODO: this approach will not work if using locale",
"valu... | Return the cell value of the table passed by argument, based in row and column. | [
"Return",
"the",
"cell",
"value",
"of",
"the",
"table",
"passed",
"by",
"argument",
"based",
"in",
"row",
"and",
"column",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L83-L143 | train | 228,055 |
turicas/rows | rows/plugins/xls.py | import_from_xls | def import_from_xls(
filename_or_fobj,
sheet_name=None,
sheet_index=0,
start_row=None,
start_column=None,
end_row=None,
end_column=None,
*args,
**kwargs
):
"""Return a rows.Table created from imported XLS file."""
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="xls")
source.fobj.close()
book = xlrd.open_workbook(
source.uri, formatting_info=True, logfile=open(os.devnull, mode="w")
)
if sheet_name is not None:
sheet = book.sheet_by_name(sheet_name)
else:
sheet = book.sheet_by_index(sheet_index)
# TODO: may re-use Excel data types
# Get header and rows
# xlrd library reads rows and columns starting from 0 and ending on
# sheet.nrows/ncols - 1. rows accepts the same pattern
# The xlrd library reads rows and columns starting from 0 and ending on
# sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no
# transformation is needed
min_row, min_column = get_table_start(sheet)
max_row, max_column = sheet.nrows - 1, sheet.ncols - 1
# TODO: consider adding a parameter `ignore_padding=True` and when it's
# True, consider `start_row` starting from `min_row` and `start_column`
# starting from `min_col`.
start_row = max(start_row if start_row is not None else min_row, min_row)
end_row = min(end_row if end_row is not None else max_row, max_row)
start_column = max(
start_column if start_column is not None else min_column, min_column
)
end_column = min(end_column if end_column is not None else max_column, max_column)
table_rows = [
[
cell_value(sheet, row_index, column_index)
for column_index in range(start_column, end_column + 1)
]
for row_index in range(start_row, end_row + 1)
]
meta = {"imported_from": "xls", "source": source, "name": sheet.name}
return create_table(table_rows, meta=meta, *args, **kwargs) | python | def import_from_xls(
filename_or_fobj,
sheet_name=None,
sheet_index=0,
start_row=None,
start_column=None,
end_row=None,
end_column=None,
*args,
**kwargs
):
"""Return a rows.Table created from imported XLS file."""
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="xls")
source.fobj.close()
book = xlrd.open_workbook(
source.uri, formatting_info=True, logfile=open(os.devnull, mode="w")
)
if sheet_name is not None:
sheet = book.sheet_by_name(sheet_name)
else:
sheet = book.sheet_by_index(sheet_index)
# TODO: may re-use Excel data types
# Get header and rows
# xlrd library reads rows and columns starting from 0 and ending on
# sheet.nrows/ncols - 1. rows accepts the same pattern
# The xlrd library reads rows and columns starting from 0 and ending on
# sheet.nrows/ncols - 1. rows also uses 0-based indexes, so no
# transformation is needed
min_row, min_column = get_table_start(sheet)
max_row, max_column = sheet.nrows - 1, sheet.ncols - 1
# TODO: consider adding a parameter `ignore_padding=True` and when it's
# True, consider `start_row` starting from `min_row` and `start_column`
# starting from `min_col`.
start_row = max(start_row if start_row is not None else min_row, min_row)
end_row = min(end_row if end_row is not None else max_row, max_row)
start_column = max(
start_column if start_column is not None else min_column, min_column
)
end_column = min(end_column if end_column is not None else max_column, max_column)
table_rows = [
[
cell_value(sheet, row_index, column_index)
for column_index in range(start_column, end_column + 1)
]
for row_index in range(start_row, end_row + 1)
]
meta = {"imported_from": "xls", "source": source, "name": sheet.name}
return create_table(table_rows, meta=meta, *args, **kwargs) | [
"def",
"import_from_xls",
"(",
"filename_or_fobj",
",",
"sheet_name",
"=",
"None",
",",
"sheet_index",
"=",
"0",
",",
"start_row",
"=",
"None",
",",
"start_column",
"=",
"None",
",",
"end_row",
"=",
"None",
",",
"end_column",
"=",
"None",
",",
"*",
"args",... | Return a rows.Table created from imported XLS file. | [
"Return",
"a",
"rows",
".",
"Table",
"created",
"from",
"imported",
"XLS",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L160-L212 | train | 228,056 |
turicas/rows | rows/plugins/xls.py | export_to_xls | def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs):
"""Export the rows.Table to XLS file and return the saved file."""
workbook = xlwt.Workbook()
sheet = workbook.add_sheet(sheet_name)
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
for column_index, field_name in enumerate(field_names):
sheet.write(0, column_index, field_name)
_convert_row = _python_to_xls([table.fields.get(field) for field in field_names])
for row_index, row in enumerate(prepared_table, start=1):
for column_index, (value, data) in enumerate(_convert_row(row)):
sheet.write(row_index, column_index, value, **data)
return_result = False
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_result = True
source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls")
workbook.save(source.fobj)
source.fobj.flush()
if return_result:
source.fobj.seek(0)
result = source.fobj.read()
else:
result = source.fobj
if source.should_close:
source.fobj.close()
return result | python | def export_to_xls(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs):
"""Export the rows.Table to XLS file and return the saved file."""
workbook = xlwt.Workbook()
sheet = workbook.add_sheet(sheet_name)
prepared_table = prepare_to_export(table, *args, **kwargs)
field_names = next(prepared_table)
for column_index, field_name in enumerate(field_names):
sheet.write(0, column_index, field_name)
_convert_row = _python_to_xls([table.fields.get(field) for field in field_names])
for row_index, row in enumerate(prepared_table, start=1):
for column_index, (value, data) in enumerate(_convert_row(row)):
sheet.write(row_index, column_index, value, **data)
return_result = False
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_result = True
source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xls")
workbook.save(source.fobj)
source.fobj.flush()
if return_result:
source.fobj.seek(0)
result = source.fobj.read()
else:
result = source.fobj
if source.should_close:
source.fobj.close()
return result | [
"def",
"export_to_xls",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"sheet_name",
"=",
"\"Sheet1\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"workbook",
"=",
"xlwt",
".",
"Workbook",
"(",
")",
"sheet",
"=",
"workbook",
".",
"a... | Export the rows.Table to XLS file and return the saved file. | [
"Export",
"the",
"rows",
".",
"Table",
"to",
"XLS",
"file",
"and",
"return",
"the",
"saved",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xls.py#L215-L250 | train | 228,057 |
turicas/rows | rows/plugins/postgresql.py | _valid_table_name | def _valid_table_name(name):
"""Verify if a given table name is valid for `rows`
Rules:
- Should start with a letter or '_'
- Letters can be capitalized or not
- Accepts letters, numbers and _
"""
if name[0] not in "_" + string.ascii_letters or not set(name).issubset(
"_" + string.ascii_letters + string.digits
):
return False
else:
return True | python | def _valid_table_name(name):
"""Verify if a given table name is valid for `rows`
Rules:
- Should start with a letter or '_'
- Letters can be capitalized or not
- Accepts letters, numbers and _
"""
if name[0] not in "_" + string.ascii_letters or not set(name).issubset(
"_" + string.ascii_letters + string.digits
):
return False
else:
return True | [
"def",
"_valid_table_name",
"(",
"name",
")",
":",
"if",
"name",
"[",
"0",
"]",
"not",
"in",
"\"_\"",
"+",
"string",
".",
"ascii_letters",
"or",
"not",
"set",
"(",
"name",
")",
".",
"issubset",
"(",
"\"_\"",
"+",
"string",
".",
"ascii_letters",
"+",
... | Verify if a given table name is valid for `rows`
Rules:
- Should start with a letter or '_'
- Letters can be capitalized or not
- Accepts letters, numbers and _ | [
"Verify",
"if",
"a",
"given",
"table",
"name",
"is",
"valid",
"for",
"rows"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/postgresql.py#L104-L119 | train | 228,058 |
turicas/rows | rows/plugins/txt.py | _parse_col_positions | def _parse_col_positions(frame_style, header_line):
"""Find the position for each column separator in the given line
If frame_style is 'None', this won work
for column names that _start_ with whitespace
(which includes non-lefthand aligned column titles)
"""
separator = re.escape(FRAMES[frame_style.lower()]["VERTICAL"])
if frame_style == "None":
separator = r"[\s]{2}[^\s]"
# Matches two whitespaces followed by a non-whitespace.
# Our column headers are serated by 3 spaces by default.
col_positions = []
# Abuse regexp engine to anotate vertical-separator positions:
re.sub(separator, lambda group: col_positions.append(group.start()), header_line)
if frame_style == "None":
col_positions.append(len(header_line) - 1)
return col_positions | python | def _parse_col_positions(frame_style, header_line):
"""Find the position for each column separator in the given line
If frame_style is 'None', this won work
for column names that _start_ with whitespace
(which includes non-lefthand aligned column titles)
"""
separator = re.escape(FRAMES[frame_style.lower()]["VERTICAL"])
if frame_style == "None":
separator = r"[\s]{2}[^\s]"
# Matches two whitespaces followed by a non-whitespace.
# Our column headers are serated by 3 spaces by default.
col_positions = []
# Abuse regexp engine to anotate vertical-separator positions:
re.sub(separator, lambda group: col_positions.append(group.start()), header_line)
if frame_style == "None":
col_positions.append(len(header_line) - 1)
return col_positions | [
"def",
"_parse_col_positions",
"(",
"frame_style",
",",
"header_line",
")",
":",
"separator",
"=",
"re",
".",
"escape",
"(",
"FRAMES",
"[",
"frame_style",
".",
"lower",
"(",
")",
"]",
"[",
"\"VERTICAL\"",
"]",
")",
"if",
"frame_style",
"==",
"\"None\"",
":... | Find the position for each column separator in the given line
If frame_style is 'None', this won work
for column names that _start_ with whitespace
(which includes non-lefthand aligned column titles) | [
"Find",
"the",
"position",
"for",
"each",
"column",
"separator",
"in",
"the",
"given",
"line"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L99-L119 | train | 228,059 |
turicas/rows | rows/plugins/txt.py | import_from_txt | def import_from_txt(
filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs
):
"""Return a rows.Table created from imported TXT file."""
# TODO: (maybe)
# enable parsing of non-fixed-width-columns
# with old algorithm - that would just split columns
# at the vertical separator character for the frame.
# (if doing so, include an optional parameter)
# Also, this fixes an outstanding unreported issue:
# trying to parse tables which fields values
# included a Pipe char - "|" - would silently
# yield bad results.
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding)
raw_contents = source.fobj.read().decode(encoding).rstrip("\n")
if frame_style is FRAME_SENTINEL:
frame_style = _guess_frame_style(raw_contents)
else:
frame_style = _parse_frame_style(frame_style)
contents = raw_contents.splitlines()
del raw_contents
if frame_style != "None":
contents = contents[1:-1]
del contents[1]
else:
# the table is possibly generated from other source.
# check if the line we reserve as a separator is realy empty.
if not contents[1].strip():
del contents[1]
col_positions = _parse_col_positions(frame_style, contents[0])
table_rows = [
[
row[start + 1 : end].strip()
for start, end in zip(col_positions, col_positions[1:])
]
for row in contents
]
meta = {
"imported_from": "txt",
"source": source,
"frame_style": frame_style,
}
return create_table(table_rows, meta=meta, *args, **kwargs) | python | def import_from_txt(
filename_or_fobj, encoding="utf-8", frame_style=FRAME_SENTINEL, *args, **kwargs
):
"""Return a rows.Table created from imported TXT file."""
# TODO: (maybe)
# enable parsing of non-fixed-width-columns
# with old algorithm - that would just split columns
# at the vertical separator character for the frame.
# (if doing so, include an optional parameter)
# Also, this fixes an outstanding unreported issue:
# trying to parse tables which fields values
# included a Pipe char - "|" - would silently
# yield bad results.
source = Source.from_file(filename_or_fobj, mode="rb", plugin_name="txt", encoding=encoding)
raw_contents = source.fobj.read().decode(encoding).rstrip("\n")
if frame_style is FRAME_SENTINEL:
frame_style = _guess_frame_style(raw_contents)
else:
frame_style = _parse_frame_style(frame_style)
contents = raw_contents.splitlines()
del raw_contents
if frame_style != "None":
contents = contents[1:-1]
del contents[1]
else:
# the table is possibly generated from other source.
# check if the line we reserve as a separator is realy empty.
if not contents[1].strip():
del contents[1]
col_positions = _parse_col_positions(frame_style, contents[0])
table_rows = [
[
row[start + 1 : end].strip()
for start, end in zip(col_positions, col_positions[1:])
]
for row in contents
]
meta = {
"imported_from": "txt",
"source": source,
"frame_style": frame_style,
}
return create_table(table_rows, meta=meta, *args, **kwargs) | [
"def",
"import_from_txt",
"(",
"filename_or_fobj",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"frame_style",
"=",
"FRAME_SENTINEL",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: (maybe)",
"# enable parsing of non-fixed-width-columns",
"# with old algorithm ... | Return a rows.Table created from imported TXT file. | [
"Return",
"a",
"rows",
".",
"Table",
"created",
"from",
"imported",
"TXT",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L130-L179 | train | 228,060 |
turicas/rows | rows/plugins/txt.py | export_to_txt | def export_to_txt(
table,
filename_or_fobj=None,
encoding=None,
frame_style="ASCII",
safe_none_frame=True,
*args,
**kwargs
):
"""Export a `rows.Table` to text.
This function can return the result as a string or save into a file (via
filename or file-like object).
`encoding` could be `None` if no filename/file-like object is specified,
then the return type will be `six.text_type`.
`frame_style`: will select the frame style to be printed around data.
Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default.
Warning: no checks are made to check the desired encoding allows the
characters needed by single and double frame styles.
`safe_none_frame`: bool, defaults to True. Affects only output with
frame_style == "None":
column titles are left-aligned and have
whitespace replaced for "_". This enables
the output to be parseable. Otherwise, the generated table will look
prettier but can not be imported back.
"""
# TODO: will work only if table.fields is OrderedDict
frame_style = _parse_frame_style(frame_style)
frame = FRAMES[frame_style.lower()]
serialized_table = serialize(table, *args, **kwargs)
field_names = next(serialized_table)
table_rows = list(serialized_table)
max_sizes = _max_column_sizes(field_names, table_rows)
dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names]
if frame_style != "None" or not safe_none_frame:
header = [field.center(max_sizes[field]) for field in field_names]
else:
header = [
field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names
]
header = "{0} {1} {0}".format(
frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header)
)
top_split_line = (
frame["DOWN AND RIGHT"]
+ frame["DOWN AND HORIZONTAL"].join(dashes)
+ frame["DOWN AND LEFT"]
)
body_split_line = (
frame["VERTICAL AND RIGHT"]
+ frame["VERTICAL AND HORIZONTAL"].join(dashes)
+ frame["VERTICAL AND LEFT"]
)
botton_split_line = (
frame["UP AND RIGHT"]
+ frame["UP AND HORIZONTAL"].join(dashes)
+ frame["UP AND LEFT"]
)
result = []
if frame_style != "None":
result += [top_split_line]
result += [header, body_split_line]
for row in table_rows:
values = [
value.rjust(max_sizes[field_name])
for field_name, value in zip(field_names, row)
]
row_data = " {} ".format(frame["VERTICAL"]).join(values)
result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data))
if frame_style != "None":
result.append(botton_split_line)
result.append("")
data = "\n".join(result)
if encoding is not None:
data = data.encode(encoding)
return export_data(filename_or_fobj, data, mode="wb") | python | def export_to_txt(
table,
filename_or_fobj=None,
encoding=None,
frame_style="ASCII",
safe_none_frame=True,
*args,
**kwargs
):
"""Export a `rows.Table` to text.
This function can return the result as a string or save into a file (via
filename or file-like object).
`encoding` could be `None` if no filename/file-like object is specified,
then the return type will be `six.text_type`.
`frame_style`: will select the frame style to be printed around data.
Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default.
Warning: no checks are made to check the desired encoding allows the
characters needed by single and double frame styles.
`safe_none_frame`: bool, defaults to True. Affects only output with
frame_style == "None":
column titles are left-aligned and have
whitespace replaced for "_". This enables
the output to be parseable. Otherwise, the generated table will look
prettier but can not be imported back.
"""
# TODO: will work only if table.fields is OrderedDict
frame_style = _parse_frame_style(frame_style)
frame = FRAMES[frame_style.lower()]
serialized_table = serialize(table, *args, **kwargs)
field_names = next(serialized_table)
table_rows = list(serialized_table)
max_sizes = _max_column_sizes(field_names, table_rows)
dashes = [frame["HORIZONTAL"] * (max_sizes[field] + 2) for field in field_names]
if frame_style != "None" or not safe_none_frame:
header = [field.center(max_sizes[field]) for field in field_names]
else:
header = [
field.replace(" ", "_").ljust(max_sizes[field]) for field in field_names
]
header = "{0} {1} {0}".format(
frame["VERTICAL"], " {} ".format(frame["VERTICAL"]).join(header)
)
top_split_line = (
frame["DOWN AND RIGHT"]
+ frame["DOWN AND HORIZONTAL"].join(dashes)
+ frame["DOWN AND LEFT"]
)
body_split_line = (
frame["VERTICAL AND RIGHT"]
+ frame["VERTICAL AND HORIZONTAL"].join(dashes)
+ frame["VERTICAL AND LEFT"]
)
botton_split_line = (
frame["UP AND RIGHT"]
+ frame["UP AND HORIZONTAL"].join(dashes)
+ frame["UP AND LEFT"]
)
result = []
if frame_style != "None":
result += [top_split_line]
result += [header, body_split_line]
for row in table_rows:
values = [
value.rjust(max_sizes[field_name])
for field_name, value in zip(field_names, row)
]
row_data = " {} ".format(frame["VERTICAL"]).join(values)
result.append("{0} {1} {0}".format(frame["VERTICAL"], row_data))
if frame_style != "None":
result.append(botton_split_line)
result.append("")
data = "\n".join(result)
if encoding is not None:
data = data.encode(encoding)
return export_data(filename_or_fobj, data, mode="wb") | [
"def",
"export_to_txt",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"frame_style",
"=",
"\"ASCII\"",
",",
"safe_none_frame",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# TODO: will work only ... | Export a `rows.Table` to text.
This function can return the result as a string or save into a file (via
filename or file-like object).
`encoding` could be `None` if no filename/file-like object is specified,
then the return type will be `six.text_type`.
`frame_style`: will select the frame style to be printed around data.
Valid values are: ('None', 'ASCII', 'single', 'double') - ASCII is default.
Warning: no checks are made to check the desired encoding allows the
characters needed by single and double frame styles.
`safe_none_frame`: bool, defaults to True. Affects only output with
frame_style == "None":
column titles are left-aligned and have
whitespace replaced for "_". This enables
the output to be parseable. Otherwise, the generated table will look
prettier but can not be imported back. | [
"Export",
"a",
"rows",
".",
"Table",
"to",
"text",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/txt.py#L182-L270 | train | 228,061 |
turicas/rows | rows/plugins/sqlite.py | import_from_sqlite | def import_from_sqlite(
filename_or_connection,
table_name="table1",
query=None,
query_args=None,
*args,
**kwargs
):
"""Return a rows.Table with data from SQLite database."""
source = get_source(filename_or_connection)
connection = source.fobj
cursor = connection.cursor()
if query is None:
if not _valid_table_name(table_name):
raise ValueError("Invalid table name: {}".format(table_name))
query = SQL_SELECT_ALL.format(table_name=table_name)
if query_args is None:
query_args = tuple()
table_rows = list(cursor.execute(query, query_args)) # TODO: may be lazy
header = [six.text_type(info[0]) for info in cursor.description]
cursor.close()
# TODO: should close connection also?
meta = {"imported_from": "sqlite", "source": source}
return create_table([header] + table_rows, meta=meta, *args, **kwargs) | python | def import_from_sqlite(
filename_or_connection,
table_name="table1",
query=None,
query_args=None,
*args,
**kwargs
):
"""Return a rows.Table with data from SQLite database."""
source = get_source(filename_or_connection)
connection = source.fobj
cursor = connection.cursor()
if query is None:
if not _valid_table_name(table_name):
raise ValueError("Invalid table name: {}".format(table_name))
query = SQL_SELECT_ALL.format(table_name=table_name)
if query_args is None:
query_args = tuple()
table_rows = list(cursor.execute(query, query_args)) # TODO: may be lazy
header = [six.text_type(info[0]) for info in cursor.description]
cursor.close()
# TODO: should close connection also?
meta = {"imported_from": "sqlite", "source": source}
return create_table([header] + table_rows, meta=meta, *args, **kwargs) | [
"def",
"import_from_sqlite",
"(",
"filename_or_connection",
",",
"table_name",
"=",
"\"table1\"",
",",
"query",
"=",
"None",
",",
"query_args",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"source",
"=",
"get_source",
"(",
"filename_or_c... | Return a rows.Table with data from SQLite database. | [
"Return",
"a",
"rows",
".",
"Table",
"with",
"data",
"from",
"SQLite",
"database",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/sqlite.py#L140-L168 | train | 228,062 |
turicas/rows | rows/plugins/xlsx.py | _cell_to_python | def _cell_to_python(cell):
"""Convert a PyOpenXL's `Cell` object to the corresponding Python object."""
data_type, value = cell.data_type, cell.value
if type(cell) is EmptyCell:
return None
elif data_type == "f" and value == "=TRUE()":
return True
elif data_type == "f" and value == "=FALSE()":
return False
elif cell.number_format.lower() == "yyyy-mm-dd":
return str(value).split(" 00:00:00")[0]
elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss":
return str(value).split(".")[0]
elif cell.number_format.endswith("%") and isinstance(value, Number):
value = Decimal(str(value))
return "{:%}".format(value)
elif value is None:
return ""
else:
return value | python | def _cell_to_python(cell):
"""Convert a PyOpenXL's `Cell` object to the corresponding Python object."""
data_type, value = cell.data_type, cell.value
if type(cell) is EmptyCell:
return None
elif data_type == "f" and value == "=TRUE()":
return True
elif data_type == "f" and value == "=FALSE()":
return False
elif cell.number_format.lower() == "yyyy-mm-dd":
return str(value).split(" 00:00:00")[0]
elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss":
return str(value).split(".")[0]
elif cell.number_format.endswith("%") and isinstance(value, Number):
value = Decimal(str(value))
return "{:%}".format(value)
elif value is None:
return ""
else:
return value | [
"def",
"_cell_to_python",
"(",
"cell",
")",
":",
"data_type",
",",
"value",
"=",
"cell",
".",
"data_type",
",",
"cell",
".",
"value",
"if",
"type",
"(",
"cell",
")",
"is",
"EmptyCell",
":",
"return",
"None",
"elif",
"data_type",
"==",
"\"f\"",
"and",
"... | Convert a PyOpenXL's `Cell` object to the corresponding Python object. | [
"Convert",
"a",
"PyOpenXL",
"s",
"Cell",
"object",
"to",
"the",
"corresponding",
"Python",
"object",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L32-L55 | train | 228,063 |
turicas/rows | rows/plugins/xlsx.py | import_from_xlsx | def import_from_xlsx(
filename_or_fobj,
sheet_name=None,
sheet_index=0,
start_row=None,
start_column=None,
end_row=None,
end_column=None,
workbook_kwargs=None,
*args,
**kwargs
):
"""Return a rows.Table created from imported XLSX file.
workbook_kwargs will be passed to openpyxl.load_workbook
"""
workbook_kwargs = workbook_kwargs or {}
if "read_only" not in workbook_kwargs:
workbook_kwargs["read_only"] = True
workbook = load_workbook(filename_or_fobj, **workbook_kwargs)
if sheet_name is None:
sheet_name = workbook.sheetnames[sheet_index]
sheet = workbook[sheet_name]
# The openpyxl library reads rows and columns starting from 1 and ending on
# sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we
# need to adjust the ranges accordingly.
min_row, min_column = sheet.min_row - 1, sheet.min_column - 1
max_row, max_column = sheet.max_row - 1, sheet.max_column - 1
# TODO: consider adding a parameter `ignore_padding=True` and when it's
# True, consider `start_row` starting from `sheet.min_row` and
# `start_column` starting from `sheet.min_col`.
start_row = start_row if start_row is not None else min_row
end_row = end_row if end_row is not None else max_row
start_column = start_column if start_column is not None else min_column
end_column = end_column if end_column is not None else max_column
table_rows = []
is_empty = lambda row: all(cell is None for cell in row)
selected_rows = sheet.iter_rows(
min_row=start_row + 1,
max_row=end_row + 1,
min_col=start_column + 1,
max_col=end_column + 1,
)
for row in selected_rows:
row = [_cell_to_python(cell) for cell in row]
if not is_empty(row):
table_rows.append(row)
source = Source.from_file(filename_or_fobj, plugin_name="xlsx")
source.fobj.close()
# TODO: pass a parameter to Source.from_file so it won't open the file
metadata = {"imported_from": "xlsx", "source": source, "name": sheet_name}
return create_table(table_rows, meta=metadata, *args, **kwargs) | python | def import_from_xlsx(
filename_or_fobj,
sheet_name=None,
sheet_index=0,
start_row=None,
start_column=None,
end_row=None,
end_column=None,
workbook_kwargs=None,
*args,
**kwargs
):
"""Return a rows.Table created from imported XLSX file.
workbook_kwargs will be passed to openpyxl.load_workbook
"""
workbook_kwargs = workbook_kwargs or {}
if "read_only" not in workbook_kwargs:
workbook_kwargs["read_only"] = True
workbook = load_workbook(filename_or_fobj, **workbook_kwargs)
if sheet_name is None:
sheet_name = workbook.sheetnames[sheet_index]
sheet = workbook[sheet_name]
# The openpyxl library reads rows and columns starting from 1 and ending on
# sheet.max_row/max_col. rows uses 0-based indexes (from 0 to N - 1), so we
# need to adjust the ranges accordingly.
min_row, min_column = sheet.min_row - 1, sheet.min_column - 1
max_row, max_column = sheet.max_row - 1, sheet.max_column - 1
# TODO: consider adding a parameter `ignore_padding=True` and when it's
# True, consider `start_row` starting from `sheet.min_row` and
# `start_column` starting from `sheet.min_col`.
start_row = start_row if start_row is not None else min_row
end_row = end_row if end_row is not None else max_row
start_column = start_column if start_column is not None else min_column
end_column = end_column if end_column is not None else max_column
table_rows = []
is_empty = lambda row: all(cell is None for cell in row)
selected_rows = sheet.iter_rows(
min_row=start_row + 1,
max_row=end_row + 1,
min_col=start_column + 1,
max_col=end_column + 1,
)
for row in selected_rows:
row = [_cell_to_python(cell) for cell in row]
if not is_empty(row):
table_rows.append(row)
source = Source.from_file(filename_or_fobj, plugin_name="xlsx")
source.fobj.close()
# TODO: pass a parameter to Source.from_file so it won't open the file
metadata = {"imported_from": "xlsx", "source": source, "name": sheet_name}
return create_table(table_rows, meta=metadata, *args, **kwargs) | [
"def",
"import_from_xlsx",
"(",
"filename_or_fobj",
",",
"sheet_name",
"=",
"None",
",",
"sheet_index",
"=",
"0",
",",
"start_row",
"=",
"None",
",",
"start_column",
"=",
"None",
",",
"end_row",
"=",
"None",
",",
"end_column",
"=",
"None",
",",
"workbook_kwa... | Return a rows.Table created from imported XLSX file.
workbook_kwargs will be passed to openpyxl.load_workbook | [
"Return",
"a",
"rows",
".",
"Table",
"created",
"from",
"imported",
"XLSX",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L58-L113 | train | 228,064 |
turicas/rows | rows/plugins/xlsx.py | export_to_xlsx | def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs):
"""Export the rows.Table to XLSX file and return the saved file."""
workbook = Workbook()
sheet = workbook.active
sheet.title = sheet_name
prepared_table = prepare_to_export(table, *args, **kwargs)
# Write header
field_names = next(prepared_table)
for col_index, field_name in enumerate(field_names):
cell = sheet.cell(row=1, column=col_index + 1)
cell.value = field_name
# Write sheet rows
_convert_row = _python_to_cell(list(map(table.fields.get, field_names)))
for row_index, row in enumerate(prepared_table, start=1):
for col_index, (value, number_format) in enumerate(_convert_row(row)):
cell = sheet.cell(row=row_index + 1, column=col_index + 1)
cell.value = value
if number_format is not None:
cell.number_format = number_format
return_result = False
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_result = True
source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx")
workbook.save(source.fobj)
source.fobj.flush()
if return_result:
source.fobj.seek(0)
result = source.fobj.read()
else:
result = source.fobj
if source.should_close:
source.fobj.close()
return result | python | def export_to_xlsx(table, filename_or_fobj=None, sheet_name="Sheet1", *args, **kwargs):
"""Export the rows.Table to XLSX file and return the saved file."""
workbook = Workbook()
sheet = workbook.active
sheet.title = sheet_name
prepared_table = prepare_to_export(table, *args, **kwargs)
# Write header
field_names = next(prepared_table)
for col_index, field_name in enumerate(field_names):
cell = sheet.cell(row=1, column=col_index + 1)
cell.value = field_name
# Write sheet rows
_convert_row = _python_to_cell(list(map(table.fields.get, field_names)))
for row_index, row in enumerate(prepared_table, start=1):
for col_index, (value, number_format) in enumerate(_convert_row(row)):
cell = sheet.cell(row=row_index + 1, column=col_index + 1)
cell.value = value
if number_format is not None:
cell.number_format = number_format
return_result = False
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_result = True
source = Source.from_file(filename_or_fobj, mode="wb", plugin_name="xlsx")
workbook.save(source.fobj)
source.fobj.flush()
if return_result:
source.fobj.seek(0)
result = source.fobj.read()
else:
result = source.fobj
if source.should_close:
source.fobj.close()
return result | [
"def",
"export_to_xlsx",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"sheet_name",
"=",
"\"Sheet1\"",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"workbook",
"=",
"Workbook",
"(",
")",
"sheet",
"=",
"workbook",
".",
"active",
"sheet... | Export the rows.Table to XLSX file and return the saved file. | [
"Export",
"the",
"rows",
".",
"Table",
"to",
"XLSX",
"file",
"and",
"return",
"the",
"saved",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/xlsx.py#L152-L193 | train | 228,065 |
turicas/rows | examples/library/organizaciones.py | download_organizations | def download_organizations():
"Download organizations JSON and extract its properties"
response = requests.get(URL)
data = response.json()
organizations = [organization["properties"] for organization in data["features"]]
return rows.import_from_dicts(organizations) | python | def download_organizations():
"Download organizations JSON and extract its properties"
response = requests.get(URL)
data = response.json()
organizations = [organization["properties"] for organization in data["features"]]
return rows.import_from_dicts(organizations) | [
"def",
"download_organizations",
"(",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"URL",
")",
"data",
"=",
"response",
".",
"json",
"(",
")",
"organizations",
"=",
"[",
"organization",
"[",
"\"properties\"",
"]",
"for",
"organization",
"in",
"d... | Download organizations JSON and extract its properties | [
"Download",
"organizations",
"JSON",
"and",
"extract",
"its",
"properties"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/examples/library/organizaciones.py#L15-L21 | train | 228,066 |
turicas/rows | rows/plugins/plugin_html.py | import_from_html | def import_from_html(
filename_or_fobj,
encoding="utf-8",
index=0,
ignore_colspan=True,
preserve_html=False,
properties=False,
table_tag="table",
row_tag="tr",
column_tag="td|th",
*args,
**kwargs
):
"""Return rows.Table from HTML file."""
source = Source.from_file(
filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding
)
html = source.fobj.read().decode(source.encoding)
html_tree = document_fromstring(html)
tables = html_tree.xpath("//{}".format(table_tag))
table = tables[index]
# TODO: set meta's "name" from @id or @name (if available)
strip_tags(table, "thead")
strip_tags(table, "tbody")
row_elements = table.xpath(row_tag)
table_rows = [
_get_row(
row,
column_tag=column_tag,
preserve_html=preserve_html,
properties=properties,
)
for row in row_elements
]
if properties:
table_rows[0][-1] = "properties"
if preserve_html and kwargs.get("fields", None) is None:
# The field names will be the first table row, so we need to strip HTML
# from it even if `preserve_html` is `True` (it's `True` only for rows,
# not for the header).
table_rows[0] = list(map(_extract_node_text, row_elements[0]))
if ignore_colspan:
max_columns = max(map(len, table_rows))
table_rows = [row for row in table_rows if len(row) == max_columns]
meta = {"imported_from": "html", "source": source}
return create_table(table_rows, meta=meta, *args, **kwargs) | python | def import_from_html(
filename_or_fobj,
encoding="utf-8",
index=0,
ignore_colspan=True,
preserve_html=False,
properties=False,
table_tag="table",
row_tag="tr",
column_tag="td|th",
*args,
**kwargs
):
"""Return rows.Table from HTML file."""
source = Source.from_file(
filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding
)
html = source.fobj.read().decode(source.encoding)
html_tree = document_fromstring(html)
tables = html_tree.xpath("//{}".format(table_tag))
table = tables[index]
# TODO: set meta's "name" from @id or @name (if available)
strip_tags(table, "thead")
strip_tags(table, "tbody")
row_elements = table.xpath(row_tag)
table_rows = [
_get_row(
row,
column_tag=column_tag,
preserve_html=preserve_html,
properties=properties,
)
for row in row_elements
]
if properties:
table_rows[0][-1] = "properties"
if preserve_html and kwargs.get("fields", None) is None:
# The field names will be the first table row, so we need to strip HTML
# from it even if `preserve_html` is `True` (it's `True` only for rows,
# not for the header).
table_rows[0] = list(map(_extract_node_text, row_elements[0]))
if ignore_colspan:
max_columns = max(map(len, table_rows))
table_rows = [row for row in table_rows if len(row) == max_columns]
meta = {"imported_from": "html", "source": source}
return create_table(table_rows, meta=meta, *args, **kwargs) | [
"def",
"import_from_html",
"(",
"filename_or_fobj",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"index",
"=",
"0",
",",
"ignore_colspan",
"=",
"True",
",",
"preserve_html",
"=",
"False",
",",
"properties",
"=",
"False",
",",
"table_tag",
"=",
"\"table\"",
",",
"... | Return rows.Table from HTML file. | [
"Return",
"rows",
".",
"Table",
"from",
"HTML",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L68-L121 | train | 228,067 |
turicas/rows | rows/plugins/plugin_html.py | export_to_html | def export_to_html(
table, filename_or_fobj=None, encoding="utf-8", caption=False, *args, **kwargs
):
"""Export and return rows.Table data to HTML file."""
serialized_table = serialize(table, *args, **kwargs)
fields = next(serialized_table)
result = ["<table>\n\n"]
if caption and table.name:
result.extend([" <caption>", table.name, "</caption>\n\n"])
result.extend([" <thead>\n", " <tr>\n"])
# TODO: set @name/@id if self.meta["name"] is set
header = [" <th> {} </th>\n".format(field) for field in fields]
result.extend(header)
result.extend([" </tr>\n", " </thead>\n", "\n", " <tbody>\n", "\n"])
for index, row in enumerate(serialized_table, start=1):
css_class = "odd" if index % 2 == 1 else "even"
result.append(' <tr class="{}">\n'.format(css_class))
for value in row:
result.extend([" <td> ", escape(value), " </td>\n"])
result.append(" </tr>\n\n")
result.append(" </tbody>\n\n</table>\n")
html = "".join(result).encode(encoding)
return export_data(filename_or_fobj, html, mode="wb") | python | def export_to_html(
table, filename_or_fobj=None, encoding="utf-8", caption=False, *args, **kwargs
):
"""Export and return rows.Table data to HTML file."""
serialized_table = serialize(table, *args, **kwargs)
fields = next(serialized_table)
result = ["<table>\n\n"]
if caption and table.name:
result.extend([" <caption>", table.name, "</caption>\n\n"])
result.extend([" <thead>\n", " <tr>\n"])
# TODO: set @name/@id if self.meta["name"] is set
header = [" <th> {} </th>\n".format(field) for field in fields]
result.extend(header)
result.extend([" </tr>\n", " </thead>\n", "\n", " <tbody>\n", "\n"])
for index, row in enumerate(serialized_table, start=1):
css_class = "odd" if index % 2 == 1 else "even"
result.append(' <tr class="{}">\n'.format(css_class))
for value in row:
result.extend([" <td> ", escape(value), " </td>\n"])
result.append(" </tr>\n\n")
result.append(" </tbody>\n\n</table>\n")
html = "".join(result).encode(encoding)
return export_data(filename_or_fobj, html, mode="wb") | [
"def",
"export_to_html",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"caption",
"=",
"False",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"serialized_table",
"=",
"serialize",
"(",
"table",
",",
"*... | Export and return rows.Table data to HTML file. | [
"Export",
"and",
"return",
"rows",
".",
"Table",
"data",
"to",
"HTML",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L124-L148 | train | 228,068 |
turicas/rows | rows/plugins/plugin_html.py | _extract_node_text | def _extract_node_text(node):
"""Extract text from a given lxml node."""
texts = map(
six.text_type.strip, map(six.text_type, map(unescape, node.xpath(".//text()")))
)
return " ".join(text for text in texts if text) | python | def _extract_node_text(node):
"""Extract text from a given lxml node."""
texts = map(
six.text_type.strip, map(six.text_type, map(unescape, node.xpath(".//text()")))
)
return " ".join(text for text in texts if text) | [
"def",
"_extract_node_text",
"(",
"node",
")",
":",
"texts",
"=",
"map",
"(",
"six",
".",
"text_type",
".",
"strip",
",",
"map",
"(",
"six",
".",
"text_type",
",",
"map",
"(",
"unescape",
",",
"node",
".",
"xpath",
"(",
"\".//text()\"",
")",
")",
")"... | Extract text from a given lxml node. | [
"Extract",
"text",
"from",
"a",
"given",
"lxml",
"node",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L151-L157 | train | 228,069 |
turicas/rows | rows/plugins/plugin_html.py | count_tables | def count_tables(filename_or_fobj, encoding="utf-8", table_tag="table"):
"""Read a file passed by arg and return your table HTML tag count."""
source = Source.from_file(
filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding
)
html = source.fobj.read().decode(source.encoding)
html_tree = document_fromstring(html)
tables = html_tree.xpath("//{}".format(table_tag))
result = len(tables)
if source.should_close:
source.fobj.close()
return result | python | def count_tables(filename_or_fobj, encoding="utf-8", table_tag="table"):
"""Read a file passed by arg and return your table HTML tag count."""
source = Source.from_file(
filename_or_fobj, plugin_name="html", mode="rb", encoding=encoding
)
html = source.fobj.read().decode(source.encoding)
html_tree = document_fromstring(html)
tables = html_tree.xpath("//{}".format(table_tag))
result = len(tables)
if source.should_close:
source.fobj.close()
return result | [
"def",
"count_tables",
"(",
"filename_or_fobj",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"table_tag",
"=",
"\"table\"",
")",
":",
"source",
"=",
"Source",
".",
"from_file",
"(",
"filename_or_fobj",
",",
"plugin_name",
"=",
"\"html\"",
",",
"mode",
"=",
"\"rb\""... | Read a file passed by arg and return your table HTML tag count. | [
"Read",
"a",
"file",
"passed",
"by",
"arg",
"and",
"return",
"your",
"table",
"HTML",
"tag",
"count",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L160-L174 | train | 228,070 |
turicas/rows | rows/plugins/plugin_html.py | tag_to_dict | def tag_to_dict(html):
"""Extract tag's attributes into a `dict`."""
element = document_fromstring(html).xpath("//html/body/child::*")[0]
attributes = dict(element.attrib)
attributes["text"] = element.text_content()
return attributes | python | def tag_to_dict(html):
"""Extract tag's attributes into a `dict`."""
element = document_fromstring(html).xpath("//html/body/child::*")[0]
attributes = dict(element.attrib)
attributes["text"] = element.text_content()
return attributes | [
"def",
"tag_to_dict",
"(",
"html",
")",
":",
"element",
"=",
"document_fromstring",
"(",
"html",
")",
".",
"xpath",
"(",
"\"//html/body/child::*\"",
")",
"[",
"0",
"]",
"attributes",
"=",
"dict",
"(",
"element",
".",
"attrib",
")",
"attributes",
"[",
"\"te... | Extract tag's attributes into a `dict`. | [
"Extract",
"tag",
"s",
"attributes",
"into",
"a",
"dict",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_html.py#L177-L183 | train | 228,071 |
turicas/rows | rows/plugins/utils.py | create_table | def create_table(
data,
meta=None,
fields=None,
skip_header=True,
import_fields=None,
samples=None,
force_types=None,
max_rows=None,
*args,
**kwargs
):
"""Create a rows.Table object based on data rows and some configurations
- `skip_header` is only used if `fields` is set
- `samples` is only used if `fields` is `None`. If samples=None, all data
is filled in memory - use with caution.
- `force_types` is only used if `fields` is `None`
- `import_fields` can be used either if `fields` is set or not, the
resulting fields will seek its order
- `fields` must always be in the same order as the data
"""
table_rows = iter(data)
force_types = force_types or {}
if import_fields is not None:
import_fields = make_header(import_fields)
# TODO: test max_rows
if fields is None: # autodetect field types
# TODO: may add `type_hints` parameter so autodetection can be easier
# (plugins may specify some possible field types).
header = make_header(next(table_rows))
if samples is not None:
sample_rows = list(islice(table_rows, 0, samples))
table_rows = chain(sample_rows, table_rows)
else:
if max_rows is not None and max_rows > 0:
sample_rows = table_rows = list(islice(table_rows, max_rows))
else:
sample_rows = table_rows = list(table_rows)
# Detect field types using only the desired columns
detected_fields = detect_types(
header,
sample_rows,
skip_indexes=[
index
for index, field in enumerate(header)
if field in force_types or field not in (import_fields or header)
],
*args,
**kwargs
)
# Check if any field was added during detecting process
new_fields = [
field_name
for field_name in detected_fields.keys()
if field_name not in header
]
# Finally create the `fields` with both header and new field names,
# based on detected fields `and force_types`
fields = OrderedDict(
[
(field_name, detected_fields.get(field_name, TextField))
for field_name in header + new_fields
]
)
fields.update(force_types)
# Update `header` and `import_fields` based on new `fields`
header = list(fields.keys())
if import_fields is None:
import_fields = header
else: # using provided field types
if not isinstance(fields, OrderedDict):
raise ValueError("`fields` must be an `OrderedDict`")
if skip_header:
# If we're skipping the header probably this row is not trustable
# (can be data or garbage).
next(table_rows)
header = make_header(list(fields.keys()))
if import_fields is None:
import_fields = header
fields = OrderedDict(
[(field_name, fields[key]) for field_name, key in zip(header, fields)]
)
diff = set(import_fields) - set(header)
if diff:
field_names = ", ".join('"{}"'.format(field) for field in diff)
raise ValueError("Invalid field names: {}".format(field_names))
fields = OrderedDict(
[(field_name, fields[field_name]) for field_name in import_fields]
)
get_row = get_items(*map(header.index, import_fields))
table = Table(fields=fields, meta=meta)
if max_rows is not None and max_rows > 0:
table_rows = islice(table_rows, max_rows)
table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows)
source = table.meta.get("source", None)
if source is not None:
if source.should_close:
source.fobj.close()
if source.should_delete and Path(source.uri).exists():
unlink(source.uri)
return table | python | def create_table(
data,
meta=None,
fields=None,
skip_header=True,
import_fields=None,
samples=None,
force_types=None,
max_rows=None,
*args,
**kwargs
):
"""Create a rows.Table object based on data rows and some configurations
- `skip_header` is only used if `fields` is set
- `samples` is only used if `fields` is `None`. If samples=None, all data
is filled in memory - use with caution.
- `force_types` is only used if `fields` is `None`
- `import_fields` can be used either if `fields` is set or not, the
resulting fields will seek its order
- `fields` must always be in the same order as the data
"""
table_rows = iter(data)
force_types = force_types or {}
if import_fields is not None:
import_fields = make_header(import_fields)
# TODO: test max_rows
if fields is None: # autodetect field types
# TODO: may add `type_hints` parameter so autodetection can be easier
# (plugins may specify some possible field types).
header = make_header(next(table_rows))
if samples is not None:
sample_rows = list(islice(table_rows, 0, samples))
table_rows = chain(sample_rows, table_rows)
else:
if max_rows is not None and max_rows > 0:
sample_rows = table_rows = list(islice(table_rows, max_rows))
else:
sample_rows = table_rows = list(table_rows)
# Detect field types using only the desired columns
detected_fields = detect_types(
header,
sample_rows,
skip_indexes=[
index
for index, field in enumerate(header)
if field in force_types or field not in (import_fields or header)
],
*args,
**kwargs
)
# Check if any field was added during detecting process
new_fields = [
field_name
for field_name in detected_fields.keys()
if field_name not in header
]
# Finally create the `fields` with both header and new field names,
# based on detected fields `and force_types`
fields = OrderedDict(
[
(field_name, detected_fields.get(field_name, TextField))
for field_name in header + new_fields
]
)
fields.update(force_types)
# Update `header` and `import_fields` based on new `fields`
header = list(fields.keys())
if import_fields is None:
import_fields = header
else: # using provided field types
if not isinstance(fields, OrderedDict):
raise ValueError("`fields` must be an `OrderedDict`")
if skip_header:
# If we're skipping the header probably this row is not trustable
# (can be data or garbage).
next(table_rows)
header = make_header(list(fields.keys()))
if import_fields is None:
import_fields = header
fields = OrderedDict(
[(field_name, fields[key]) for field_name, key in zip(header, fields)]
)
diff = set(import_fields) - set(header)
if diff:
field_names = ", ".join('"{}"'.format(field) for field in diff)
raise ValueError("Invalid field names: {}".format(field_names))
fields = OrderedDict(
[(field_name, fields[field_name]) for field_name in import_fields]
)
get_row = get_items(*map(header.index, import_fields))
table = Table(fields=fields, meta=meta)
if max_rows is not None and max_rows > 0:
table_rows = islice(table_rows, max_rows)
table.extend(dict(zip(import_fields, get_row(row))) for row in table_rows)
source = table.meta.get("source", None)
if source is not None:
if source.should_close:
source.fobj.close()
if source.should_delete and Path(source.uri).exists():
unlink(source.uri)
return table | [
"def",
"create_table",
"(",
"data",
",",
"meta",
"=",
"None",
",",
"fields",
"=",
"None",
",",
"skip_header",
"=",
"True",
",",
"import_fields",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"force_types",
"=",
"None",
",",
"max_rows",
"=",
"None",
",... | Create a rows.Table object based on data rows and some configurations
- `skip_header` is only used if `fields` is set
- `samples` is only used if `fields` is `None`. If samples=None, all data
is filled in memory - use with caution.
- `force_types` is only used if `fields` is `None`
- `import_fields` can be used either if `fields` is set or not, the
resulting fields will seek its order
- `fields` must always be in the same order as the data | [
"Create",
"a",
"rows",
".",
"Table",
"object",
"based",
"on",
"data",
"rows",
"and",
"some",
"configurations"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/utils.py#L75-L189 | train | 228,072 |
turicas/rows | rows/plugins/utils.py | export_data | def export_data(filename_or_fobj, data, mode="w"):
"""Return the object ready to be exported or only data if filename_or_fobj is not passed."""
if filename_or_fobj is None:
return data
_, fobj = get_filename_and_fobj(filename_or_fobj, mode=mode)
source = Source.from_file(filename_or_fobj, mode=mode, plugin_name=None)
source.fobj.write(data)
source.fobj.flush()
return source.fobj | python | def export_data(filename_or_fobj, data, mode="w"):
"""Return the object ready to be exported or only data if filename_or_fobj is not passed."""
if filename_or_fobj is None:
return data
_, fobj = get_filename_and_fobj(filename_or_fobj, mode=mode)
source = Source.from_file(filename_or_fobj, mode=mode, plugin_name=None)
source.fobj.write(data)
source.fobj.flush()
return source.fobj | [
"def",
"export_data",
"(",
"filename_or_fobj",
",",
"data",
",",
"mode",
"=",
"\"w\"",
")",
":",
"if",
"filename_or_fobj",
"is",
"None",
":",
"return",
"data",
"_",
",",
"fobj",
"=",
"get_filename_and_fobj",
"(",
"filename_or_fobj",
",",
"mode",
"=",
"mode",... | Return the object ready to be exported or only data if filename_or_fobj is not passed. | [
"Return",
"the",
"object",
"ready",
"to",
"be",
"exported",
"or",
"only",
"data",
"if",
"filename_or_fobj",
"is",
"not",
"passed",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/utils.py#L236-L246 | train | 228,073 |
turicas/rows | rows/plugins/plugin_csv.py | read_sample | def read_sample(fobj, sample):
"""Read `sample` bytes from `fobj` and return the cursor to where it was."""
cursor = fobj.tell()
data = fobj.read(sample)
fobj.seek(cursor)
return data | python | def read_sample(fobj, sample):
"""Read `sample` bytes from `fobj` and return the cursor to where it was."""
cursor = fobj.tell()
data = fobj.read(sample)
fobj.seek(cursor)
return data | [
"def",
"read_sample",
"(",
"fobj",
",",
"sample",
")",
":",
"cursor",
"=",
"fobj",
".",
"tell",
"(",
")",
"data",
"=",
"fobj",
".",
"read",
"(",
"sample",
")",
"fobj",
".",
"seek",
"(",
"cursor",
")",
"return",
"data"
] | Read `sample` bytes from `fobj` and return the cursor to where it was. | [
"Read",
"sample",
"bytes",
"from",
"fobj",
"and",
"return",
"the",
"cursor",
"to",
"where",
"it",
"was",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_csv.py#L103-L108 | train | 228,074 |
turicas/rows | rows/plugins/plugin_csv.py | export_to_csv | def export_to_csv(
table,
filename_or_fobj=None,
encoding="utf-8",
dialect=unicodecsv.excel,
batch_size=100,
callback=None,
*args,
**kwargs
):
"""Export a `rows.Table` to a CSV file.
If a file-like object is provided it MUST be in binary mode, like in
`open(filename, mode='wb')`.
If not filename/fobj is provided, the function returns a string with CSV
contents.
"""
# TODO: will work only if table.fields is OrderedDict
# TODO: should use fobj? What about creating a method like json.dumps?
return_data, should_close = False, None
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_data = should_close = True
source = Source.from_file(
filename_or_fobj,
plugin_name="csv",
mode="wb",
encoding=encoding,
should_close=should_close,
)
# TODO: may use `io.BufferedWriter` instead of `ipartition` so user can
# choose the real size (in Bytes) when to flush to the file system, instead
# number of rows
writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect)
if callback is None:
for batch in ipartition(serialize(table, *args, **kwargs), batch_size):
writer.writerows(batch)
else:
serialized = serialize(table, *args, **kwargs)
writer.writerow(next(serialized)) # First, write the header
total = 0
for batch in ipartition(serialized, batch_size):
writer.writerows(batch)
total += len(batch)
callback(total)
if return_data:
source.fobj.seek(0)
result = source.fobj.read()
else:
source.fobj.flush()
result = source.fobj
if source.should_close:
source.fobj.close()
return result | python | def export_to_csv(
table,
filename_or_fobj=None,
encoding="utf-8",
dialect=unicodecsv.excel,
batch_size=100,
callback=None,
*args,
**kwargs
):
"""Export a `rows.Table` to a CSV file.
If a file-like object is provided it MUST be in binary mode, like in
`open(filename, mode='wb')`.
If not filename/fobj is provided, the function returns a string with CSV
contents.
"""
# TODO: will work only if table.fields is OrderedDict
# TODO: should use fobj? What about creating a method like json.dumps?
return_data, should_close = False, None
if filename_or_fobj is None:
filename_or_fobj = BytesIO()
return_data = should_close = True
source = Source.from_file(
filename_or_fobj,
plugin_name="csv",
mode="wb",
encoding=encoding,
should_close=should_close,
)
# TODO: may use `io.BufferedWriter` instead of `ipartition` so user can
# choose the real size (in Bytes) when to flush to the file system, instead
# number of rows
writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect)
if callback is None:
for batch in ipartition(serialize(table, *args, **kwargs), batch_size):
writer.writerows(batch)
else:
serialized = serialize(table, *args, **kwargs)
writer.writerow(next(serialized)) # First, write the header
total = 0
for batch in ipartition(serialized, batch_size):
writer.writerows(batch)
total += len(batch)
callback(total)
if return_data:
source.fobj.seek(0)
result = source.fobj.read()
else:
source.fobj.flush()
result = source.fobj
if source.should_close:
source.fobj.close()
return result | [
"def",
"export_to_csv",
"(",
"table",
",",
"filename_or_fobj",
"=",
"None",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"dialect",
"=",
"unicodecsv",
".",
"excel",
",",
"batch_size",
"=",
"100",
",",
"callback",
"=",
"None",
",",
"*",
"args",
",",
"*",
"*",
... | Export a `rows.Table` to a CSV file.
If a file-like object is provided it MUST be in binary mode, like in
`open(filename, mode='wb')`.
If not filename/fobj is provided, the function returns a string with CSV
contents. | [
"Export",
"a",
"rows",
".",
"Table",
"to",
"a",
"CSV",
"file",
"."
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_csv.py#L139-L201 | train | 228,075 |
turicas/rows | rows/operations.py | join | def join(keys, tables):
"""Merge a list of `Table` objects using `keys` to group rows"""
# Make new (merged) Table fields
fields = OrderedDict()
for table in tables:
fields.update(table.fields)
# TODO: may raise an error if a same field is different in some tables
# Check if all keys are inside merged Table's fields
fields_keys = set(fields.keys())
for key in keys:
if key not in fields_keys:
raise ValueError('Invalid key: "{}"'.format(key))
# Group rows by key, without missing ordering
none_fields = lambda: OrderedDict({field: None for field in fields.keys()})
data = OrderedDict()
for table in tables:
for row in table:
row_key = tuple([getattr(row, key) for key in keys])
if row_key not in data:
data[row_key] = none_fields()
data[row_key].update(row._asdict())
merged = Table(fields=fields)
merged.extend(data.values())
return merged | python | def join(keys, tables):
"""Merge a list of `Table` objects using `keys` to group rows"""
# Make new (merged) Table fields
fields = OrderedDict()
for table in tables:
fields.update(table.fields)
# TODO: may raise an error if a same field is different in some tables
# Check if all keys are inside merged Table's fields
fields_keys = set(fields.keys())
for key in keys:
if key not in fields_keys:
raise ValueError('Invalid key: "{}"'.format(key))
# Group rows by key, without missing ordering
none_fields = lambda: OrderedDict({field: None for field in fields.keys()})
data = OrderedDict()
for table in tables:
for row in table:
row_key = tuple([getattr(row, key) for key in keys])
if row_key not in data:
data[row_key] = none_fields()
data[row_key].update(row._asdict())
merged = Table(fields=fields)
merged.extend(data.values())
return merged | [
"def",
"join",
"(",
"keys",
",",
"tables",
")",
":",
"# Make new (merged) Table fields",
"fields",
"=",
"OrderedDict",
"(",
")",
"for",
"table",
"in",
"tables",
":",
"fields",
".",
"update",
"(",
"table",
".",
"fields",
")",
"# TODO: may raise an error if a same... | Merge a list of `Table` objects using `keys` to group rows | [
"Merge",
"a",
"list",
"of",
"Table",
"objects",
"using",
"keys",
"to",
"group",
"rows"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/operations.py#L26-L53 | train | 228,076 |
turicas/rows | rows/operations.py | transform | def transform(fields, function, *tables):
"Return a new table based on other tables and a transformation function"
new_table = Table(fields=fields)
for table in tables:
for row in filter(bool, map(lambda row: function(row, table), table)):
new_table.append(row)
return new_table | python | def transform(fields, function, *tables):
"Return a new table based on other tables and a transformation function"
new_table = Table(fields=fields)
for table in tables:
for row in filter(bool, map(lambda row: function(row, table), table)):
new_table.append(row)
return new_table | [
"def",
"transform",
"(",
"fields",
",",
"function",
",",
"*",
"tables",
")",
":",
"new_table",
"=",
"Table",
"(",
"fields",
"=",
"fields",
")",
"for",
"table",
"in",
"tables",
":",
"for",
"row",
"in",
"filter",
"(",
"bool",
",",
"map",
"(",
"lambda",... | Return a new table based on other tables and a transformation function | [
"Return",
"a",
"new",
"table",
"based",
"on",
"other",
"tables",
"and",
"a",
"transformation",
"function"
] | c74da41ae9ed091356b803a64f8a30c641c5fc45 | https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/operations.py#L56-L65 | train | 228,077 |
jupyterhub/kubespawner | kubespawner/objects.py | make_pvc | def make_pvc(
name,
storage_class,
access_modes,
storage,
labels=None,
annotations=None,
):
"""
Make a k8s pvc specification for running a user notebook.
Parameters
----------
name:
Name of persistent volume claim. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
storage_class:
String of the name of the k8s Storage Class to use.
access_modes:
A list of specifying what access mode the pod should have towards the pvc
storage:
The ammount of storage needed for the pvc
"""
pvc = V1PersistentVolumeClaim()
pvc.kind = "PersistentVolumeClaim"
pvc.api_version = "v1"
pvc.metadata = V1ObjectMeta()
pvc.metadata.name = name
pvc.metadata.annotations = (annotations or {}).copy()
pvc.metadata.labels = (labels or {}).copy()
pvc.spec = V1PersistentVolumeClaimSpec()
pvc.spec.access_modes = access_modes
pvc.spec.resources = V1ResourceRequirements()
pvc.spec.resources.requests = {"storage": storage}
if storage_class:
pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class})
pvc.spec.storage_class_name = storage_class
return pvc | python | def make_pvc(
name,
storage_class,
access_modes,
storage,
labels=None,
annotations=None,
):
"""
Make a k8s pvc specification for running a user notebook.
Parameters
----------
name:
Name of persistent volume claim. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
storage_class:
String of the name of the k8s Storage Class to use.
access_modes:
A list of specifying what access mode the pod should have towards the pvc
storage:
The ammount of storage needed for the pvc
"""
pvc = V1PersistentVolumeClaim()
pvc.kind = "PersistentVolumeClaim"
pvc.api_version = "v1"
pvc.metadata = V1ObjectMeta()
pvc.metadata.name = name
pvc.metadata.annotations = (annotations or {}).copy()
pvc.metadata.labels = (labels or {}).copy()
pvc.spec = V1PersistentVolumeClaimSpec()
pvc.spec.access_modes = access_modes
pvc.spec.resources = V1ResourceRequirements()
pvc.spec.resources.requests = {"storage": storage}
if storage_class:
pvc.metadata.annotations.update({"volume.beta.kubernetes.io/storage-class": storage_class})
pvc.spec.storage_class_name = storage_class
return pvc | [
"def",
"make_pvc",
"(",
"name",
",",
"storage_class",
",",
"access_modes",
",",
"storage",
",",
"labels",
"=",
"None",
",",
"annotations",
"=",
"None",
",",
")",
":",
"pvc",
"=",
"V1PersistentVolumeClaim",
"(",
")",
"pvc",
".",
"kind",
"=",
"\"PersistentVo... | Make a k8s pvc specification for running a user notebook.
Parameters
----------
name:
Name of persistent volume claim. Must be unique within the namespace the object is
going to be created in. Must be a valid DNS label.
storage_class:
String of the name of the k8s Storage Class to use.
access_modes:
A list of specifying what access mode the pod should have towards the pvc
storage:
The ammount of storage needed for the pvc | [
"Make",
"a",
"k8s",
"pvc",
"specification",
"for",
"running",
"a",
"user",
"notebook",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/objects.py#L393-L433 | train | 228,078 |
jupyterhub/kubespawner | kubespawner/objects.py | make_ingress | def make_ingress(
name,
routespec,
target,
data
):
"""
Returns an ingress, service, endpoint object that'll work for this service
"""
# move beta imports here,
# which are more sensitive to kubernetes version
# and will change when they move out of beta
from kubernetes.client.models import (
V1beta1Ingress, V1beta1IngressSpec, V1beta1IngressRule,
V1beta1HTTPIngressRuleValue, V1beta1HTTPIngressPath,
V1beta1IngressBackend,
)
meta = V1ObjectMeta(
name=name,
annotations={
'hub.jupyter.org/proxy-data': json.dumps(data),
'hub.jupyter.org/proxy-routespec': routespec,
'hub.jupyter.org/proxy-target': target
},
labels={
'heritage': 'jupyterhub',
'component': 'singleuser-server',
'hub.jupyter.org/proxy-route': 'true'
}
)
if routespec.startswith('/'):
host = None
path = routespec
else:
host, path = routespec.split('/', 1)
target_parts = urlparse(target)
target_ip = target_parts.hostname
target_port = target_parts.port
target_is_ip = re.match('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', target_ip) is not None
# Make endpoint object
if target_is_ip:
endpoint = V1Endpoints(
kind='Endpoints',
metadata=meta,
subsets=[
V1EndpointSubset(
addresses=[V1EndpointAddress(ip=target_ip)],
ports=[V1EndpointPort(port=target_port)]
)
]
)
else:
endpoint = None
# Make service object
if target_is_ip:
service = V1Service(
kind='Service',
metadata=meta,
spec=V1ServiceSpec(
type='ClusterIP',
external_name='',
ports=[V1ServicePort(port=target_port, target_port=target_port)]
)
)
else:
service = V1Service(
kind='Service',
metadata=meta,
spec=V1ServiceSpec(
type='ExternalName',
external_name=target_ip,
cluster_ip='',
ports=[V1ServicePort(port=target_port, target_port=target_port)],
),
)
# Make Ingress object
ingress = V1beta1Ingress(
kind='Ingress',
metadata=meta,
spec=V1beta1IngressSpec(
rules=[V1beta1IngressRule(
host=host,
http=V1beta1HTTPIngressRuleValue(
paths=[
V1beta1HTTPIngressPath(
path=path,
backend=V1beta1IngressBackend(
service_name=name,
service_port=target_port
)
)
]
)
)]
)
)
return endpoint, service, ingress | python | def make_ingress(
name,
routespec,
target,
data
):
"""
Returns an ingress, service, endpoint object that'll work for this service
"""
# move beta imports here,
# which are more sensitive to kubernetes version
# and will change when they move out of beta
from kubernetes.client.models import (
V1beta1Ingress, V1beta1IngressSpec, V1beta1IngressRule,
V1beta1HTTPIngressRuleValue, V1beta1HTTPIngressPath,
V1beta1IngressBackend,
)
meta = V1ObjectMeta(
name=name,
annotations={
'hub.jupyter.org/proxy-data': json.dumps(data),
'hub.jupyter.org/proxy-routespec': routespec,
'hub.jupyter.org/proxy-target': target
},
labels={
'heritage': 'jupyterhub',
'component': 'singleuser-server',
'hub.jupyter.org/proxy-route': 'true'
}
)
if routespec.startswith('/'):
host = None
path = routespec
else:
host, path = routespec.split('/', 1)
target_parts = urlparse(target)
target_ip = target_parts.hostname
target_port = target_parts.port
target_is_ip = re.match('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', target_ip) is not None
# Make endpoint object
if target_is_ip:
endpoint = V1Endpoints(
kind='Endpoints',
metadata=meta,
subsets=[
V1EndpointSubset(
addresses=[V1EndpointAddress(ip=target_ip)],
ports=[V1EndpointPort(port=target_port)]
)
]
)
else:
endpoint = None
# Make service object
if target_is_ip:
service = V1Service(
kind='Service',
metadata=meta,
spec=V1ServiceSpec(
type='ClusterIP',
external_name='',
ports=[V1ServicePort(port=target_port, target_port=target_port)]
)
)
else:
service = V1Service(
kind='Service',
metadata=meta,
spec=V1ServiceSpec(
type='ExternalName',
external_name=target_ip,
cluster_ip='',
ports=[V1ServicePort(port=target_port, target_port=target_port)],
),
)
# Make Ingress object
ingress = V1beta1Ingress(
kind='Ingress',
metadata=meta,
spec=V1beta1IngressSpec(
rules=[V1beta1IngressRule(
host=host,
http=V1beta1HTTPIngressRuleValue(
paths=[
V1beta1HTTPIngressPath(
path=path,
backend=V1beta1IngressBackend(
service_name=name,
service_port=target_port
)
)
]
)
)]
)
)
return endpoint, service, ingress | [
"def",
"make_ingress",
"(",
"name",
",",
"routespec",
",",
"target",
",",
"data",
")",
":",
"# move beta imports here,",
"# which are more sensitive to kubernetes version",
"# and will change when they move out of beta",
"from",
"kubernetes",
".",
"client",
".",
"models",
"... | Returns an ingress, service, endpoint object that'll work for this service | [
"Returns",
"an",
"ingress",
"service",
"endpoint",
"object",
"that",
"ll",
"work",
"for",
"this",
"service"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/objects.py#L435-L541 | train | 228,079 |
jupyterhub/kubespawner | kubespawner/clients.py | shared_client | def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client | python | def shared_client(ClientType, *args, **kwargs):
"""Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared.
"""
kwarg_key = tuple((key, kwargs[key]) for key in sorted(kwargs))
cache_key = (ClientType, args, kwarg_key)
client = None
if cache_key in _client_cache:
# resolve cached weakref
# client can still be None after this!
client = _client_cache[cache_key]()
if client is None:
Client = getattr(kubernetes.client, ClientType)
client = Client(*args, **kwargs)
# cache weakref so that clients can be garbage collected
_client_cache[cache_key] = weakref.ref(client)
return client | [
"def",
"shared_client",
"(",
"ClientType",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"kwarg_key",
"=",
"tuple",
"(",
"(",
"key",
",",
"kwargs",
"[",
"key",
"]",
")",
"for",
"key",
"in",
"sorted",
"(",
"kwargs",
")",
")",
"cache_key",
"="... | Return a single shared kubernetes client instance
A weak reference to the instance is cached,
so that concurrent calls to shared_client
will all return the same instance until
all references to the client are cleared. | [
"Return",
"a",
"single",
"shared",
"kubernetes",
"client",
"instance"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/clients.py#L25-L46 | train | 228,080 |
jupyterhub/kubespawner | kubespawner/utils.py | generate_hashed_slug | def generate_hashed_slug(slug, limit=63, hash_length=6):
"""
Generate a unique name that's within a certain length limit
Most k8s objects have a 63 char name limit. We wanna be able to compress
larger names down to that if required, while still maintaining some
amount of legibility about what the objects really are.
If the length of the slug is shorter than the limit - hash_length, we just
return slug directly. If not, we truncate the slug to (limit - hash_length)
characters, hash the slug and append hash_length characters from the hash
to the end of the truncated slug. This ensures that these names are always
unique no matter what.
"""
if len(slug) < (limit - hash_length):
return slug
slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest()
return '{prefix}-{hash}'.format(
prefix=slug[:limit - hash_length - 1],
hash=slug_hash[:hash_length],
).lower() | python | def generate_hashed_slug(slug, limit=63, hash_length=6):
"""
Generate a unique name that's within a certain length limit
Most k8s objects have a 63 char name limit. We wanna be able to compress
larger names down to that if required, while still maintaining some
amount of legibility about what the objects really are.
If the length of the slug is shorter than the limit - hash_length, we just
return slug directly. If not, we truncate the slug to (limit - hash_length)
characters, hash the slug and append hash_length characters from the hash
to the end of the truncated slug. This ensures that these names are always
unique no matter what.
"""
if len(slug) < (limit - hash_length):
return slug
slug_hash = hashlib.sha256(slug.encode('utf-8')).hexdigest()
return '{prefix}-{hash}'.format(
prefix=slug[:limit - hash_length - 1],
hash=slug_hash[:hash_length],
).lower() | [
"def",
"generate_hashed_slug",
"(",
"slug",
",",
"limit",
"=",
"63",
",",
"hash_length",
"=",
"6",
")",
":",
"if",
"len",
"(",
"slug",
")",
"<",
"(",
"limit",
"-",
"hash_length",
")",
":",
"return",
"slug",
"slug_hash",
"=",
"hashlib",
".",
"sha256",
... | Generate a unique name that's within a certain length limit
Most k8s objects have a 63 char name limit. We wanna be able to compress
larger names down to that if required, while still maintaining some
amount of legibility about what the objects really are.
If the length of the slug is shorter than the limit - hash_length, we just
return slug directly. If not, we truncate the slug to (limit - hash_length)
characters, hash the slug and append hash_length characters from the hash
to the end of the truncated slug. This ensures that these names are always
unique no matter what. | [
"Generate",
"a",
"unique",
"name",
"that",
"s",
"within",
"a",
"certain",
"length",
"limit"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L7-L29 | train | 228,081 |
jupyterhub/kubespawner | kubespawner/utils.py | get_k8s_model | def get_k8s_model(model_type, model_dict):
"""
Returns an instance of type specified model_type from an model instance or
represantative dictionary.
"""
model_dict = copy.deepcopy(model_dict)
if isinstance(model_dict, model_type):
return model_dict
elif isinstance(model_dict, dict):
# convert the dictionaries camelCase keys to snake_case keys
model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict)
# use the dictionary keys to initialize a model of given type
return model_type(**model_dict)
else:
raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__)) | python | def get_k8s_model(model_type, model_dict):
"""
Returns an instance of type specified model_type from an model instance or
represantative dictionary.
"""
model_dict = copy.deepcopy(model_dict)
if isinstance(model_dict, model_type):
return model_dict
elif isinstance(model_dict, dict):
# convert the dictionaries camelCase keys to snake_case keys
model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict)
# use the dictionary keys to initialize a model of given type
return model_type(**model_dict)
else:
raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__)) | [
"def",
"get_k8s_model",
"(",
"model_type",
",",
"model_dict",
")",
":",
"model_dict",
"=",
"copy",
".",
"deepcopy",
"(",
"model_dict",
")",
"if",
"isinstance",
"(",
"model_dict",
",",
"model_type",
")",
":",
"return",
"model_dict",
"elif",
"isinstance",
"(",
... | Returns an instance of type specified model_type from an model instance or
represantative dictionary. | [
"Returns",
"an",
"instance",
"of",
"type",
"specified",
"model_type",
"from",
"an",
"model",
"instance",
"or",
"represantative",
"dictionary",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L75-L90 | train | 228,082 |
jupyterhub/kubespawner | kubespawner/utils.py | _get_k8s_model_dict | def _get_k8s_model_dict(model_type, model):
"""
Returns a dictionary representation of a provided model type
"""
model = copy.deepcopy(model)
if isinstance(model, model_type):
return model.to_dict()
elif isinstance(model, dict):
return _map_dict_keys_to_model_attributes(model_type, model)
else:
raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__)) | python | def _get_k8s_model_dict(model_type, model):
"""
Returns a dictionary representation of a provided model type
"""
model = copy.deepcopy(model)
if isinstance(model, model_type):
return model.to_dict()
elif isinstance(model, dict):
return _map_dict_keys_to_model_attributes(model_type, model)
else:
raise AttributeError("Expected object of type '{}' (or 'dict') but got '{}'.".format(model_type.__name__, type(model).__name__)) | [
"def",
"_get_k8s_model_dict",
"(",
"model_type",
",",
"model",
")",
":",
"model",
"=",
"copy",
".",
"deepcopy",
"(",
"model",
")",
"if",
"isinstance",
"(",
"model",
",",
"model_type",
")",
":",
"return",
"model",
".",
"to_dict",
"(",
")",
"elif",
"isinst... | Returns a dictionary representation of a provided model type | [
"Returns",
"a",
"dictionary",
"representation",
"of",
"a",
"provided",
"model",
"type"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/utils.py#L92-L103 | train | 228,083 |
jupyterhub/kubespawner | kubespawner/reflector.py | NamespacedResourceReflector._list_and_update | def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = getattr(self.api, self.list_method_name)(
self.namespace,
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
)
# This is an atomic operation on the dictionary!
self.resources = {p.metadata.name: p for p in initial_resources.items}
# return the resource version so we can hook up a watch
return initial_resources.metadata.resource_version | python | def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = getattr(self.api, self.list_method_name)(
self.namespace,
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
)
# This is an atomic operation on the dictionary!
self.resources = {p.metadata.name: p for p in initial_resources.items}
# return the resource version so we can hook up a watch
return initial_resources.metadata.resource_version | [
"def",
"_list_and_update",
"(",
"self",
")",
":",
"initial_resources",
"=",
"getattr",
"(",
"self",
".",
"api",
",",
"self",
".",
"list_method_name",
")",
"(",
"self",
".",
"namespace",
",",
"label_selector",
"=",
"self",
".",
"label_selector",
",",
"field_s... | Update current list of resources by doing a full fetch.
Overwrites all current resource info. | [
"Update",
"current",
"list",
"of",
"resources",
"by",
"doing",
"a",
"full",
"fetch",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L147-L162 | train | 228,084 |
jupyterhub/kubespawner | kubespawner/reflector.py | NamespacedResourceReflector._watch_and_update | def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok!
"""
selectors = []
log_name = ""
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
self.log.info(
"watching for %s with %s in namespace %s",
self.kind, log_selector, self.namespace,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = self._list_and_update()
if not self.first_load_future.done():
# signal that we've loaded our initial data
self.first_load_future.set_result(None)
watch_args = {
'namespace': self.namespace,
'label_selector': self.label_selector,
'field_selector': self.field_selector,
'resource_version': resource_version,
}
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
for ev in w.stream(
getattr(self.api, self.list_method_name),
**watch_args
):
cur_delay = 0.1
resource = ev['object']
if ev['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(resource.metadata.name, None)
else:
# This is an atomic operation on the dictionary!
self.resources[resource.metadata.name] = resource
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind, watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception("Error when watching resources, retrying in %ss", cur_delay)
time.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
self.log.warning("%s watcher finished", self.kind) | python | def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok!
"""
selectors = []
log_name = ""
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
self.log.info(
"watching for %s with %s in namespace %s",
self.kind, log_selector, self.namespace,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = self._list_and_update()
if not self.first_load_future.done():
# signal that we've loaded our initial data
self.first_load_future.set_result(None)
watch_args = {
'namespace': self.namespace,
'label_selector': self.label_selector,
'field_selector': self.field_selector,
'resource_version': resource_version,
}
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
for ev in w.stream(
getattr(self.api, self.list_method_name),
**watch_args
):
cur_delay = 0.1
resource = ev['object']
if ev['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(resource.metadata.name, None)
else:
# This is an atomic operation on the dictionary!
self.resources[resource.metadata.name] = resource
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind, watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception("Error when watching resources, retrying in %ss", cur_delay)
time.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stop_event.is_set():
self.log.info("%s watcher stopped", self.kind)
break
self.log.warning("%s watcher finished", self.kind) | [
"def",
"_watch_and_update",
"(",
"self",
")",
":",
"selectors",
"=",
"[",
"]",
"log_name",
"=",
"\"\"",
"if",
"self",
".",
"label_selector",
":",
"selectors",
".",
"append",
"(",
"\"label selector=%r\"",
"%",
"self",
".",
"label_selector",
")",
"if",
"self",... | Keeps the current list of resources up-to-date
This method is to be run not on the main thread!
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Note that we're playing a bit with fire here, by updating a dictionary
in this thread while it is probably being read in another thread
without using locks! However, dictionary access itself is atomic,
and as long as we don't try to mutate them (do a 'fetch / modify /
update' cycle on them), we should be ok! | [
"Keeps",
"the",
"current",
"list",
"of",
"resources",
"up",
"-",
"to",
"-",
"date"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L164-L269 | train | 228,085 |
jupyterhub/kubespawner | kubespawner/reflector.py | NamespacedResourceReflector.start | def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if hasattr(self, 'watch_thread'):
raise ValueError('Thread watching for resources is already running')
self._list_and_update()
self.watch_thread = threading.Thread(target=self._watch_and_update)
# If the watch_thread is only thread left alive, exit app
self.watch_thread.daemon = True
self.watch_thread.start() | python | def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if hasattr(self, 'watch_thread'):
raise ValueError('Thread watching for resources is already running')
self._list_and_update()
self.watch_thread = threading.Thread(target=self._watch_and_update)
# If the watch_thread is only thread left alive, exit app
self.watch_thread.daemon = True
self.watch_thread.start() | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"hasattr",
"(",
"self",
",",
"'watch_thread'",
")",
":",
"raise",
"ValueError",
"(",
"'Thread watching for resources is already running'",
")",
"self",
".",
"_list_and_update",
"(",
")",
"self",
".",
"watch_thread",
"... | Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards! | [
"Start",
"the",
"reflection",
"process!"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/reflector.py#L271-L288 | train | 228,086 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.get_pod_manifest | def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = yield gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = yield gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = yield gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if self.cmd:
real_cmd = self.cmd + self.get_args()
else:
real_cmd = None
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(self._expand_all(self.extra_annotations))
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secret=self.image_pull_secrets,
node_selector=self.node_selector,
run_as_uid=uid,
run_as_gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
run_privileged=self.privileged,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self.service_account,
extra_container_config=self.extra_container_config,
extra_pod_config=self.extra_pod_config,
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
logger=self.log,
) | python | def get_pod_manifest(self):
"""
Make a pod manifest that will spawn current user's notebook pod.
"""
if callable(self.uid):
uid = yield gen.maybe_future(self.uid(self))
else:
uid = self.uid
if callable(self.gid):
gid = yield gen.maybe_future(self.gid(self))
else:
gid = self.gid
if callable(self.fs_gid):
fs_gid = yield gen.maybe_future(self.fs_gid(self))
else:
fs_gid = self.fs_gid
if callable(self.supplemental_gids):
supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self))
else:
supplemental_gids = self.supplemental_gids
if self.cmd:
real_cmd = self.cmd + self.get_args()
else:
real_cmd = None
labels = self._build_pod_labels(self._expand_all(self.extra_labels))
annotations = self._build_common_annotations(self._expand_all(self.extra_annotations))
return make_pod(
name=self.pod_name,
cmd=real_cmd,
port=self.port,
image=self.image,
image_pull_policy=self.image_pull_policy,
image_pull_secret=self.image_pull_secrets,
node_selector=self.node_selector,
run_as_uid=uid,
run_as_gid=gid,
fs_gid=fs_gid,
supplemental_gids=supplemental_gids,
run_privileged=self.privileged,
env=self.get_env(),
volumes=self._expand_all(self.volumes),
volume_mounts=self._expand_all(self.volume_mounts),
working_dir=self.working_dir,
labels=labels,
annotations=annotations,
cpu_limit=self.cpu_limit,
cpu_guarantee=self.cpu_guarantee,
mem_limit=self.mem_limit,
mem_guarantee=self.mem_guarantee,
extra_resource_limits=self.extra_resource_limits,
extra_resource_guarantees=self.extra_resource_guarantees,
lifecycle_hooks=self.lifecycle_hooks,
init_containers=self._expand_all(self.init_containers),
service_account=self.service_account,
extra_container_config=self.extra_container_config,
extra_pod_config=self.extra_pod_config,
extra_containers=self._expand_all(self.extra_containers),
scheduler_name=self.scheduler_name,
tolerations=self.tolerations,
node_affinity_preferred=self.node_affinity_preferred,
node_affinity_required=self.node_affinity_required,
pod_affinity_preferred=self.pod_affinity_preferred,
pod_affinity_required=self.pod_affinity_required,
pod_anti_affinity_preferred=self.pod_anti_affinity_preferred,
pod_anti_affinity_required=self.pod_anti_affinity_required,
priority_class_name=self.priority_class_name,
logger=self.log,
) | [
"def",
"get_pod_manifest",
"(",
"self",
")",
":",
"if",
"callable",
"(",
"self",
".",
"uid",
")",
":",
"uid",
"=",
"yield",
"gen",
".",
"maybe_future",
"(",
"self",
".",
"uid",
"(",
"self",
")",
")",
"else",
":",
"uid",
"=",
"self",
".",
"uid",
"... | Make a pod manifest that will spawn current user's notebook pod. | [
"Make",
"a",
"pod",
"manifest",
"that",
"will",
"spawn",
"current",
"user",
"s",
"notebook",
"pod",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1303-L1376 | train | 228,087 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.get_pvc_manifest | def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({
'component': 'singleuser-storage'
})
annotations = self._build_common_annotations({})
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
storage=self.storage_capacity,
labels=labels,
annotations=annotations
) | python | def get_pvc_manifest(self):
"""
Make a pvc manifest that will spawn current user's pvc.
"""
labels = self._build_common_labels(self._expand_all(self.storage_extra_labels))
labels.update({
'component': 'singleuser-storage'
})
annotations = self._build_common_annotations({})
return make_pvc(
name=self.pvc_name,
storage_class=self.storage_class,
access_modes=self.storage_access_modes,
storage=self.storage_capacity,
labels=labels,
annotations=annotations
) | [
"def",
"get_pvc_manifest",
"(",
"self",
")",
":",
"labels",
"=",
"self",
".",
"_build_common_labels",
"(",
"self",
".",
"_expand_all",
"(",
"self",
".",
"storage_extra_labels",
")",
")",
"labels",
".",
"update",
"(",
"{",
"'component'",
":",
"'singleuser-stora... | Make a pvc manifest that will spawn current user's pvc. | [
"Make",
"a",
"pvc",
"manifest",
"that",
"will",
"spawn",
"current",
"user",
"s",
"pvc",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1378-L1396 | train | 228,088 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.is_pod_running | def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None and
pod.status.phase == 'Running' and
pod.status.pod_ip is not None and
pod.metadata.deletion_timestamp is None and
all([cs.ready for cs in pod.status.container_statuses])
)
return is_running | python | def is_pod_running(self, pod):
"""
Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object.
"""
# FIXME: Validate if this is really the best way
is_running = (
pod is not None and
pod.status.phase == 'Running' and
pod.status.pod_ip is not None and
pod.metadata.deletion_timestamp is None and
all([cs.ready for cs in pod.status.container_statuses])
)
return is_running | [
"def",
"is_pod_running",
"(",
"self",
",",
"pod",
")",
":",
"# FIXME: Validate if this is really the best way",
"is_running",
"=",
"(",
"pod",
"is",
"not",
"None",
"and",
"pod",
".",
"status",
".",
"phase",
"==",
"'Running'",
"and",
"pod",
".",
"status",
".",
... | Check if the given pod is running
pod must be a dictionary representing a Pod kubernetes API object. | [
"Check",
"if",
"the",
"given",
"pod",
"is",
"running"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1398-L1412 | train | 228,089 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.get_env | def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env | python | def get_env(self):
"""Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env
"""
env = super(KubeSpawner, self).get_env()
# deprecate image
env['JUPYTER_IMAGE_SPEC'] = self.image
env['JUPYTER_IMAGE'] = self.image
return env | [
"def",
"get_env",
"(",
"self",
")",
":",
"env",
"=",
"super",
"(",
"KubeSpawner",
",",
"self",
")",
".",
"get_env",
"(",
")",
"# deprecate image",
"env",
"[",
"'JUPYTER_IMAGE_SPEC'",
"]",
"=",
"self",
".",
"image",
"env",
"[",
"'JUPYTER_IMAGE'",
"]",
"="... | Return the environment dict to use for the Spawner.
See also: jupyterhub.Spawner.get_env | [
"Return",
"the",
"environment",
"dict",
"to",
"use",
"for",
"the",
"Spawner",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1429-L1440 | train | 228,090 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.poll | def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | python | def poll(self):
"""
Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running.
"""
# have to wait for first load of data before we have a valid answer
if not self.pod_reflector.first_load_future.done():
yield self.pod_reflector.first_load_future
data = self.pod_reflector.pods.get(self.pod_name, None)
if data is not None:
if data.status.phase == 'Pending':
return None
ctr_stat = data.status.container_statuses
if ctr_stat is None: # No status, no container (we hope)
# This seems to happen when a pod is idle-culled.
return 1
for c in ctr_stat:
# return exit code if notebook container has terminated
if c.name == 'notebook':
if c.state.terminated:
# call self.stop to delete the pod
if self.delete_stopped_pods:
yield self.stop(now=True)
return c.state.terminated.exit_code
break
# None means pod is running or starting up
return None
# pod doesn't exist or has been deleted
return 1 | [
"def",
"poll",
"(",
"self",
")",
":",
"# have to wait for first load of data before we have a valid answer",
"if",
"not",
"self",
".",
"pod_reflector",
".",
"first_load_future",
".",
"done",
"(",
")",
":",
"yield",
"self",
".",
"pod_reflector",
".",
"first_load_future... | Check if the pod is still running.
Uses the same interface as subprocess.Popen.poll(): if the pod is
still running, returns None. If the pod has exited, return the
exit code if we can determine it, or 1 if it has exited but we
don't know how. These are the return values JupyterHub expects.
Note that a clean exit will have an exit code of zero, so it is
necessary to check that the returned value is None, rather than
just Falsy, to determine that the pod is still running. | [
"Check",
"if",
"the",
"pod",
"is",
"still",
"running",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1456-L1492 | train | 228,091 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.events | def events(self):
"""Filter event-reflector to just our events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event.involved_object.name != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event.metadata.uid == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events | python | def events(self):
"""Filter event-reflector to just our events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start().
"""
if not self.event_reflector:
return []
events = []
for event in self.event_reflector.events:
if event.involved_object.name != self.pod_name:
# only consider events for my pod name
continue
if self._last_event and event.metadata.uid == self._last_event:
# saw last_event marker, ignore any previous events
# and only consider future events
# only include events *after* our _last_event marker
events = []
else:
events.append(event)
return events | [
"def",
"events",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"event_reflector",
":",
"return",
"[",
"]",
"events",
"=",
"[",
"]",
"for",
"event",
"in",
"self",
".",
"event_reflector",
".",
"events",
":",
"if",
"event",
".",
"involved_object",
".",... | Filter event-reflector to just our events
Returns list of all events that match our pod_name
since our ._last_event (if defined).
._last_event is set at the beginning of .start(). | [
"Filter",
"event",
"-",
"reflector",
"to",
"just",
"our",
"events"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1499-L1522 | train | 228,092 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner._start_reflector | def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs):
"""Start a shared reflector on the KubeSpawner class
key: key for the reflector (e.g. 'pod' or 'events')
Reflector: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
main_loop = IOLoop.current()
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
if replace and previous_reflector:
# we replaced the reflector, stop the old one
previous_reflector.stop()
# return the current reflector
return self.__class__.reflectors[key] | python | def _start_reflector(self, key, ReflectorClass, replace=False, **kwargs):
"""Start a shared reflector on the KubeSpawner class
key: key for the reflector (e.g. 'pod' or 'events')
Reflector: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
main_loop = IOLoop.current()
def on_reflector_failure():
self.log.critical(
"%s reflector failed, halting Hub.",
key.title(),
)
sys.exit(1)
previous_reflector = self.__class__.reflectors.get(key)
if replace or not previous_reflector:
self.__class__.reflectors[key] = ReflectorClass(
parent=self,
namespace=self.namespace,
on_failure=on_reflector_failure,
**kwargs,
)
if replace and previous_reflector:
# we replaced the reflector, stop the old one
previous_reflector.stop()
# return the current reflector
return self.__class__.reflectors[key] | [
"def",
"_start_reflector",
"(",
"self",
",",
"key",
",",
"ReflectorClass",
",",
"replace",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"main_loop",
"=",
"IOLoop",
".",
"current",
"(",
")",
"def",
"on_reflector_failure",
"(",
")",
":",
"self",
".",
... | Start a shared reflector on the KubeSpawner class
key: key for the reflector (e.g. 'pod' or 'events')
Reflector: Reflector class to be instantiated
kwargs: extra keyword-args to be relayed to ReflectorClass
If replace=False and the pod reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors). | [
"Start",
"a",
"shared",
"reflector",
"on",
"the",
"KubeSpawner",
"class"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1566-L1603 | train | 228,093 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner._start_watching_events | def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
"events",
EventReflector,
fields={"involvedObject.kind": "Pod"},
replace=replace,
) | python | def _start_watching_events(self, replace=False):
"""Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors).
"""
return self._start_reflector(
"events",
EventReflector,
fields={"involvedObject.kind": "Pod"},
replace=replace,
) | [
"def",
"_start_watching_events",
"(",
"self",
",",
"replace",
"=",
"False",
")",
":",
"return",
"self",
".",
"_start_reflector",
"(",
"\"events\"",
",",
"EventReflector",
",",
"fields",
"=",
"{",
"\"involvedObject.kind\"",
":",
"\"Pod\"",
"}",
",",
"replace",
... | Start the events reflector
If replace=False and the event reflector is already running,
do nothing.
If replace=True, a running pod reflector will be stopped
and a new one started (for recovering from possible errors). | [
"Start",
"the",
"events",
"reflector"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1606-L1620 | train | 228,094 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner._options_form_default | def _options_form_default(self):
'''
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
'''
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list) | python | def _options_form_default(self):
'''
Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined.
'''
if not self.profile_list:
return ''
if callable(self.profile_list):
return self._render_options_form_dynamically
else:
return self._render_options_form(self.profile_list) | [
"def",
"_options_form_default",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"profile_list",
":",
"return",
"''",
"if",
"callable",
"(",
"self",
".",
"profile_list",
")",
":",
"return",
"self",
".",
"_render_options_form_dynamically",
"else",
":",
"return"... | Build the form template according to the `profile_list` setting.
Returns:
'' when no `profile_list` has been defined
The rendered template (using jinja2) when `profile_list` is defined. | [
"Build",
"the",
"form",
"template",
"according",
"to",
"the",
"profile_list",
"setting",
"."
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1824-L1837 | train | 228,095 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.options_from_form | def options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "8 CPUs"}``
"""
if not self.profile_list or self._profile_list is None:
return formdata
# Default to first profile if somehow none is provided
try:
selected_profile = int(formdata.get('profile', [0])[0])
options = self._profile_list[selected_profile]
except (TypeError, IndexError, ValueError):
raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None))
return {
'profile': options['display_name']
} | python | def options_from_form(self, formdata):
"""get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "8 CPUs"}``
"""
if not self.profile_list or self._profile_list is None:
return formdata
# Default to first profile if somehow none is provided
try:
selected_profile = int(formdata.get('profile', [0])[0])
options = self._profile_list[selected_profile]
except (TypeError, IndexError, ValueError):
raise web.HTTPError(400, "No such profile: %i", formdata.get('profile', None))
return {
'profile': options['display_name']
} | [
"def",
"options_from_form",
"(",
"self",
",",
"formdata",
")",
":",
"if",
"not",
"self",
".",
"profile_list",
"or",
"self",
".",
"_profile_list",
"is",
"None",
":",
"return",
"formdata",
"# Default to first profile if somehow none is provided",
"try",
":",
"selected... | get the option selected by the user on the form
This only constructs the user_options dict,
it should not actually load any options.
That is done later in `.load_user_options()`
Args:
formdata: user selection returned by the form
To access to the value, you can use the `get` accessor and the name of the html element,
for example::
formdata.get('profile',[0])
to get the value of the form named "profile", as defined in `form_template`::
<select class="form-control" name="profile"...>
</select>
Returns:
user_options (dict): the selected profile in the user_options form,
e.g. ``{"profile": "8 CPUs"}`` | [
"get",
"the",
"option",
"selected",
"by",
"the",
"user",
"on",
"the",
"form"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1839-L1873 | train | 228,096 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner._load_profile | def _load_profile(self, profile_name):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['display_name'] == profile_name:
break
else:
if profile_name:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
profile_name, ', '.join(p['display_name'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v) | python | def _load_profile(self, profile_name):
"""Load a profile by name
Called by load_user_options
"""
# find the profile
default_profile = self._profile_list[0]
for profile in self._profile_list:
if profile.get('default', False):
# explicit default, not the first
default_profile = profile
if profile['display_name'] == profile_name:
break
else:
if profile_name:
# name specified, but not found
raise ValueError("No such profile: %s. Options include: %s" % (
profile_name, ', '.join(p['display_name'] for p in self._profile_list)
))
else:
# no name specified, use the default
profile = default_profile
self.log.debug("Applying KubeSpawner override for profile '%s'", profile['display_name'])
kubespawner_override = profile.get('kubespawner_override', {})
for k, v in kubespawner_override.items():
if callable(v):
v = v(self)
self.log.debug(".. overriding KubeSpawner value %s=%s (callable result)", k, v)
else:
self.log.debug(".. overriding KubeSpawner value %s=%s", k, v)
setattr(self, k, v) | [
"def",
"_load_profile",
"(",
"self",
",",
"profile_name",
")",
":",
"# find the profile",
"default_profile",
"=",
"self",
".",
"_profile_list",
"[",
"0",
"]",
"for",
"profile",
"in",
"self",
".",
"_profile_list",
":",
"if",
"profile",
".",
"get",
"(",
"'defa... | Load a profile by name
Called by load_user_options | [
"Load",
"a",
"profile",
"by",
"name"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1876-L1909 | train | 228,097 |
jupyterhub/kubespawner | kubespawner/spawner.py | KubeSpawner.load_user_options | def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
self._profile_list = yield gen.maybe_future(self.profile_list(self))
else:
self._profile_list = self.profile_list
if self._profile_list:
yield self._load_profile(self.user_options.get('profile', None)) | python | def load_user_options(self):
"""Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options.
"""
if self._profile_list is None:
if callable(self.profile_list):
self._profile_list = yield gen.maybe_future(self.profile_list(self))
else:
self._profile_list = self.profile_list
if self._profile_list:
yield self._load_profile(self.user_options.get('profile', None)) | [
"def",
"load_user_options",
"(",
"self",
")",
":",
"if",
"self",
".",
"_profile_list",
"is",
"None",
":",
"if",
"callable",
"(",
"self",
".",
"profile_list",
")",
":",
"self",
".",
"_profile_list",
"=",
"yield",
"gen",
".",
"maybe_future",
"(",
"self",
"... | Load user options from self.user_options dict
This can be set via POST to the API or via options_from_form
Only supported argument by default is 'profile'.
Override in subclasses to support other options. | [
"Load",
"user",
"options",
"from",
"self",
".",
"user_options",
"dict"
] | 46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13 | https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1912-L1926 | train | 228,098 |
ev3dev/ev3dev-lang-python | ev3dev2/motor.py | list_motors | def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
"""
This is a generator function that enumerates all tacho motors that match
the provided arguments.
Parameters:
name_pattern: pattern that device name should match.
For example, 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, driver_name='lego-ev3-l-motor', or
address=['outB', 'outC']. When argument value
is a list, then a match against any entry of the list is
enough.
"""
class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Motor.SYSTEM_CLASS_NAME)
return (Motor(name_pattern=name, name_exact=True)
for name in list_device_names(class_path, name_pattern, **kwargs)) | python | def list_motors(name_pattern=Motor.SYSTEM_DEVICE_NAME_CONVENTION, **kwargs):
"""
This is a generator function that enumerates all tacho motors that match
the provided arguments.
Parameters:
name_pattern: pattern that device name should match.
For example, 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, driver_name='lego-ev3-l-motor', or
address=['outB', 'outC']. When argument value
is a list, then a match against any entry of the list is
enough.
"""
class_path = abspath(Device.DEVICE_ROOT_PATH + '/' + Motor.SYSTEM_CLASS_NAME)
return (Motor(name_pattern=name, name_exact=True)
for name in list_device_names(class_path, name_pattern, **kwargs)) | [
"def",
"list_motors",
"(",
"name_pattern",
"=",
"Motor",
".",
"SYSTEM_DEVICE_NAME_CONVENTION",
",",
"*",
"*",
"kwargs",
")",
":",
"class_path",
"=",
"abspath",
"(",
"Device",
".",
"DEVICE_ROOT_PATH",
"+",
"'/'",
"+",
"Motor",
".",
"SYSTEM_CLASS_NAME",
")",
"re... | This is a generator function that enumerates all tacho motors that match
the provided arguments.
Parameters:
name_pattern: pattern that device name should match.
For example, 'motor*'. Default value: '*'.
keyword arguments: used for matching the corresponding device
attributes. For example, driver_name='lego-ev3-l-motor', or
address=['outB', 'outC']. When argument value
is a list, then a match against any entry of the list is
enough. | [
"This",
"is",
"a",
"generator",
"function",
"that",
"enumerates",
"all",
"tacho",
"motors",
"that",
"match",
"the",
"provided",
"arguments",
"."
] | afc98d35004b533dc161a01f7c966e78607d7c1e | https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1060-L1077 | train | 228,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.