repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
asifpy/django-crudbuilder
|
crudbuilder/formset.py
|
BaseInlineFormset.construct_formset
|
python
|
def construct_formset(self):
if not self.inline_model or not self.parent_model:
msg = "Parent and Inline models are required in {}".format(self.__class__.__name__)
raise NotModelException(msg)
return inlineformset_factory(
self.parent_model,
self.inline_model,
**self.get_factory_kwargs())
|
Returns an instance of the inline formset
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/formset.py#L18-L29
|
[
"def get_factory_kwargs(self):\n \"\"\"\n Returns the keyword arguments for calling the formset factory\n \"\"\"\n kwargs = {}\n kwargs.update({\n 'can_delete': self.can_delete,\n 'extra': self.extra,\n 'exclude': self.exclude,\n 'fields': self.fields,\n 'formfield_callback': self.formfield_callback,\n 'fk_name': self.fk_name,\n })\n if self.formset_class:\n kwargs['formset'] = self.formset_class\n\n if self.child_form:\n kwargs['form'] = self.child_form\n return kwargs\n"
] |
class BaseInlineFormset(object):
extra = 3
can_delete = True
inline_model = None
parent_model = None
exclude = []
fields = None
formfield_callback = None
fk_name = None
formset_class = None
child_form = None
def get_factory_kwargs(self):
"""
Returns the keyword arguments for calling the formset factory
"""
kwargs = {}
kwargs.update({
'can_delete': self.can_delete,
'extra': self.extra,
'exclude': self.exclude,
'fields': self.fields,
'formfield_callback': self.formfield_callback,
'fk_name': self.fk_name,
})
if self.formset_class:
kwargs['formset'] = self.formset_class
if self.child_form:
kwargs['form'] = self.child_form
return kwargs
|
asifpy/django-crudbuilder
|
crudbuilder/formset.py
|
BaseInlineFormset.get_factory_kwargs
|
python
|
def get_factory_kwargs(self):
kwargs = {}
kwargs.update({
'can_delete': self.can_delete,
'extra': self.extra,
'exclude': self.exclude,
'fields': self.fields,
'formfield_callback': self.formfield_callback,
'fk_name': self.fk_name,
})
if self.formset_class:
kwargs['formset'] = self.formset_class
if self.child_form:
kwargs['form'] = self.child_form
return kwargs
|
Returns the keyword arguments for calling the formset factory
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/formset.py#L31-L49
| null |
class BaseInlineFormset(object):
extra = 3
can_delete = True
inline_model = None
parent_model = None
exclude = []
fields = None
formfield_callback = None
fk_name = None
formset_class = None
child_form = None
def construct_formset(self):
"""
Returns an instance of the inline formset
"""
if not self.inline_model or not self.parent_model:
msg = "Parent and Inline models are required in {}".format(self.__class__.__name__)
raise NotModelException(msg)
return inlineformset_factory(
self.parent_model,
self.inline_model,
**self.get_factory_kwargs())
|
asifpy/django-crudbuilder
|
crudbuilder/helpers.py
|
plural
|
python
|
def plural(text):
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result
|
>>> plural('activity')
'activities'
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/helpers.py#L19-L80
| null |
import re
import string
import imp
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
__all__ = ['plural', 'mixedToUnder', 'capword', 'lowerword', 'underToMixed']
# http://code.activestate.com/recipes/82102-smart-pluralisation-english/
# Text utilities
#
_mixedToUnderRE = re.compile(r'[A-Z]+')
def mixedToUnder(s): # pragma: no cover
"""
Sample:
>>> mixedToUnder("FooBarBaz")
'foo_bar_baz'
Special case for ID:
>>> mixedToUnder("FooBarID")
'foo_bar_id'
"""
if s.endswith('ID'):
return mixedToUnder(s[:-2] + "_id")
trans = _mixedToUnderRE.sub(mixedToUnderSub, s)
if trans.startswith('_'):
trans = trans[1:]
return trans
def mixedToUnderSub(match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def capword(s):
"""
>>> capword('foo')
'Foo'
"""
return s[0].upper() + s[1:]
def lowerword(s): # pragma: no cover
"""
>>> lowerword('Hello')
'hello'
"""
return s[0].lower() + s[1:]
_underToMixedRE = re.compile('_.')
def underToMixed(name):
"""
>>> underToMixed('some_large_model_name_perhaps')
'someLargeModelNamePerhaps'
>>> underToMixed('exception_for_id')
'exceptionForID'
"""
if name.endswith('_id'):
return underToMixed(name[:-3] + "ID")
return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),
name)
def model_class_form(name):
"""
>>> model_class_form('foo_bar_baz')
'FooBarBaz'
"""
return capword(underToMixed(name))
def underToAllCaps(value): # pragma: no cover
"""
>>> underToAllCaps('foo_bar_baz')
'Foo Bar Baz'
"""
return ' '.join(map(lambda x: x.title(), value.split('_')))
def import_crud(app):
'''
Import crud module and register all model cruds which it contains
'''
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
return None
try:
imp.find_module('crud', app_path)
except ImportError:
return None
module = import_module("%s.crud" % app)
return module
def auto_discover():
'''
Auto register all apps that have module crud
'''
from django.conf import settings
for app in settings.INSTALLED_APPS:
import_crud(app)
def custom_postfix_url(crud, model):
postfix = getattr(crud, 'custom_postfix_url', None)
if not postfix:
postfix = plural(model)
return postfix
def get_urlresolver():
"""Returns apporpriate urlresolver based on django version"""
try:
from django.core import urlresolvers
return urlresolvers
except ImportError:
from django import urls
return urls
reverse_lazy = get_urlresolver().reverse_lazy
reverse = get_urlresolver().reverse
|
asifpy/django-crudbuilder
|
crudbuilder/helpers.py
|
mixedToUnder
|
python
|
def mixedToUnder(s): # pragma: no cover
if s.endswith('ID'):
return mixedToUnder(s[:-2] + "_id")
trans = _mixedToUnderRE.sub(mixedToUnderSub, s)
if trans.startswith('_'):
trans = trans[1:]
return trans
|
Sample:
>>> mixedToUnder("FooBarBaz")
'foo_bar_baz'
Special case for ID:
>>> mixedToUnder("FooBarID")
'foo_bar_id'
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/helpers.py#L88-L103
|
[
"def mixedToUnder(s): # pragma: no cover\n \"\"\"\n Sample:\n >>> mixedToUnder(\"FooBarBaz\")\n 'foo_bar_baz'\n\n Special case for ID:\n >>> mixedToUnder(\"FooBarID\")\n 'foo_bar_id'\n \"\"\"\n if s.endswith('ID'):\n return mixedToUnder(s[:-2] + \"_id\")\n trans = _mixedToUnderRE.sub(mixedToUnderSub, s)\n if trans.startswith('_'):\n trans = trans[1:]\n return trans\n"
] |
import re
import string
import imp
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
__all__ = ['plural', 'mixedToUnder', 'capword', 'lowerword', 'underToMixed']
# http://code.activestate.com/recipes/82102-smart-pluralisation-english/
def plural(text):
"""
>>> plural('activity')
'activities'
"""
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result
# Text utilities
#
_mixedToUnderRE = re.compile(r'[A-Z]+')
def mixedToUnderSub(match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def capword(s):
"""
>>> capword('foo')
'Foo'
"""
return s[0].upper() + s[1:]
def lowerword(s): # pragma: no cover
"""
>>> lowerword('Hello')
'hello'
"""
return s[0].lower() + s[1:]
_underToMixedRE = re.compile('_.')
def underToMixed(name):
"""
>>> underToMixed('some_large_model_name_perhaps')
'someLargeModelNamePerhaps'
>>> underToMixed('exception_for_id')
'exceptionForID'
"""
if name.endswith('_id'):
return underToMixed(name[:-3] + "ID")
return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),
name)
def model_class_form(name):
"""
>>> model_class_form('foo_bar_baz')
'FooBarBaz'
"""
return capword(underToMixed(name))
def underToAllCaps(value): # pragma: no cover
"""
>>> underToAllCaps('foo_bar_baz')
'Foo Bar Baz'
"""
return ' '.join(map(lambda x: x.title(), value.split('_')))
def import_crud(app):
'''
Import crud module and register all model cruds which it contains
'''
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
return None
try:
imp.find_module('crud', app_path)
except ImportError:
return None
module = import_module("%s.crud" % app)
return module
def auto_discover():
'''
Auto register all apps that have module crud
'''
from django.conf import settings
for app in settings.INSTALLED_APPS:
import_crud(app)
def custom_postfix_url(crud, model):
postfix = getattr(crud, 'custom_postfix_url', None)
if not postfix:
postfix = plural(model)
return postfix
def get_urlresolver():
"""Returns apporpriate urlresolver based on django version"""
try:
from django.core import urlresolvers
return urlresolvers
except ImportError:
from django import urls
return urls
reverse_lazy = get_urlresolver().reverse_lazy
reverse = get_urlresolver().reverse
|
asifpy/django-crudbuilder
|
crudbuilder/helpers.py
|
underToMixed
|
python
|
def underToMixed(name):
if name.endswith('_id'):
return underToMixed(name[:-3] + "ID")
return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),
name)
|
>>> underToMixed('some_large_model_name_perhaps')
'someLargeModelNamePerhaps'
>>> underToMixed('exception_for_id')
'exceptionForID'
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/helpers.py#L132-L143
|
[
"def underToMixed(name):\n \"\"\"\n >>> underToMixed('some_large_model_name_perhaps')\n 'someLargeModelNamePerhaps'\n\n >>> underToMixed('exception_for_id')\n 'exceptionForID'\n \"\"\"\n if name.endswith('_id'):\n return underToMixed(name[:-3] + \"ID\")\n return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),\n name)\n"
] |
import re
import string
import imp
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
__all__ = ['plural', 'mixedToUnder', 'capword', 'lowerword', 'underToMixed']
# http://code.activestate.com/recipes/82102-smart-pluralisation-english/
def plural(text):
"""
>>> plural('activity')
'activities'
"""
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result
# Text utilities
#
_mixedToUnderRE = re.compile(r'[A-Z]+')
def mixedToUnder(s): # pragma: no cover
"""
Sample:
>>> mixedToUnder("FooBarBaz")
'foo_bar_baz'
Special case for ID:
>>> mixedToUnder("FooBarID")
'foo_bar_id'
"""
if s.endswith('ID'):
return mixedToUnder(s[:-2] + "_id")
trans = _mixedToUnderRE.sub(mixedToUnderSub, s)
if trans.startswith('_'):
trans = trans[1:]
return trans
def mixedToUnderSub(match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def capword(s):
"""
>>> capword('foo')
'Foo'
"""
return s[0].upper() + s[1:]
def lowerword(s): # pragma: no cover
"""
>>> lowerword('Hello')
'hello'
"""
return s[0].lower() + s[1:]
_underToMixedRE = re.compile('_.')
def model_class_form(name):
"""
>>> model_class_form('foo_bar_baz')
'FooBarBaz'
"""
return capword(underToMixed(name))
def underToAllCaps(value): # pragma: no cover
"""
>>> underToAllCaps('foo_bar_baz')
'Foo Bar Baz'
"""
return ' '.join(map(lambda x: x.title(), value.split('_')))
def import_crud(app):
'''
Import crud module and register all model cruds which it contains
'''
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
return None
try:
imp.find_module('crud', app_path)
except ImportError:
return None
module = import_module("%s.crud" % app)
return module
def auto_discover():
'''
Auto register all apps that have module crud
'''
from django.conf import settings
for app in settings.INSTALLED_APPS:
import_crud(app)
def custom_postfix_url(crud, model):
postfix = getattr(crud, 'custom_postfix_url', None)
if not postfix:
postfix = plural(model)
return postfix
def get_urlresolver():
"""Returns apporpriate urlresolver based on django version"""
try:
from django.core import urlresolvers
return urlresolvers
except ImportError:
from django import urls
return urls
reverse_lazy = get_urlresolver().reverse_lazy
reverse = get_urlresolver().reverse
|
asifpy/django-crudbuilder
|
crudbuilder/helpers.py
|
import_crud
|
python
|
def import_crud(app):
'''
Import crud module and register all model cruds which it contains
'''
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
return None
try:
imp.find_module('crud', app_path)
except ImportError:
return None
module = import_module("%s.crud" % app)
return module
|
Import crud module and register all model cruds which it contains
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/helpers.py#L162-L179
| null |
import re
import string
import imp
try:
# Django versions >= 1.9
from django.utils.module_loading import import_module
except ImportError:
# Django versions < 1.9
from django.utils.importlib import import_module
__all__ = ['plural', 'mixedToUnder', 'capword', 'lowerword', 'underToMixed']
# http://code.activestate.com/recipes/82102-smart-pluralisation-english/
def plural(text):
"""
>>> plural('activity')
'activities'
"""
aberrant = {
'knife': 'knives',
'self': 'selves',
'elf': 'elves',
'life': 'lives',
'hoof': 'hooves',
'leaf': 'leaves',
'echo': 'echoes',
'embargo': 'embargoes',
'hero': 'heroes',
'potato': 'potatoes',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'child': 'children',
'woman': 'women',
'man': 'men',
'person': 'people',
'goose': 'geese',
'mouse': 'mice',
'barracks': 'barracks',
'deer': 'deer',
'nucleus': 'nuclei',
'syllabus': 'syllabi',
'focus': 'foci',
'fungus': 'fungi',
'cactus': 'cacti',
'phenomenon': 'phenomena',
'index': 'indices',
'appendix': 'appendices',
'criterion': 'criteria',
}
if text in aberrant:
result = '%s' % aberrant[text]
else:
postfix = 's'
if len(text) > 2:
vowels = 'aeiou'
if text[-2:] in ('ch', 'sh'):
postfix = 'es'
elif text[-1:] == 'y':
if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):
postfix = 's'
else:
postfix = 'ies'
text = text[:-1]
elif text[-2:] == 'is':
postfix = 'es'
text = text[:-2]
elif text[-1:] in ('s', 'z', 'x'):
postfix = 'es'
result = '%s%s' % (text, postfix)
return result
# Text utilities
#
_mixedToUnderRE = re.compile(r'[A-Z]+')
def mixedToUnder(s): # pragma: no cover
"""
Sample:
>>> mixedToUnder("FooBarBaz")
'foo_bar_baz'
Special case for ID:
>>> mixedToUnder("FooBarID")
'foo_bar_id'
"""
if s.endswith('ID'):
return mixedToUnder(s[:-2] + "_id")
trans = _mixedToUnderRE.sub(mixedToUnderSub, s)
if trans.startswith('_'):
trans = trans[1:]
return trans
def mixedToUnderSub(match):
m = match.group(0).lower()
if len(m) > 1:
return '_%s_%s' % (m[:-1], m[-1])
else:
return '_%s' % m
def capword(s):
"""
>>> capword('foo')
'Foo'
"""
return s[0].upper() + s[1:]
def lowerword(s): # pragma: no cover
"""
>>> lowerword('Hello')
'hello'
"""
return s[0].lower() + s[1:]
_underToMixedRE = re.compile('_.')
def underToMixed(name):
"""
>>> underToMixed('some_large_model_name_perhaps')
'someLargeModelNamePerhaps'
>>> underToMixed('exception_for_id')
'exceptionForID'
"""
if name.endswith('_id'):
return underToMixed(name[:-3] + "ID")
return _underToMixedRE.sub(lambda m: m.group(0)[1].upper(),
name)
def model_class_form(name):
"""
>>> model_class_form('foo_bar_baz')
'FooBarBaz'
"""
return capword(underToMixed(name))
def underToAllCaps(value): # pragma: no cover
"""
>>> underToAllCaps('foo_bar_baz')
'Foo Bar Baz'
"""
return ' '.join(map(lambda x: x.title(), value.split('_')))
def auto_discover():
'''
Auto register all apps that have module crud
'''
from django.conf import settings
for app in settings.INSTALLED_APPS:
import_crud(app)
def custom_postfix_url(crud, model):
postfix = getattr(crud, 'custom_postfix_url', None)
if not postfix:
postfix = plural(model)
return postfix
def get_urlresolver():
"""Returns apporpriate urlresolver based on django version"""
try:
from django.core import urlresolvers
return urlresolvers
except ImportError:
from django import urls
return urls
reverse_lazy = get_urlresolver().reverse_lazy
reverse = get_urlresolver().reverse
|
asifpy/django-crudbuilder
|
crudbuilder/abstract.py
|
BaseBuilder.get_model_class
|
python
|
def get_model_class(self):
try:
c = ContentType.objects.get(app_label=self.app, model=self.model)
except ContentType.DoesNotExist:
# try another kind of resolution
# fixes a situation where a proxy model is defined in some external app.
if django.VERSION >= (1, 7):
return apps.get_model(self.app, self.model)
else:
return c.model_class()
|
Returns model class
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/abstract.py#L53-L63
| null |
class BaseBuilder(object):
def __init__(
self,
app,
model,
crud
):
self.model = model
self.app = app
self.crud = crud
self.custom_modelform = self._has_crud_attr('custom_modelform')
self.modelform_excludes = self._has_crud_attr('modelform_excludes')
self.detailview_excludes = self._has_crud_attr('detailview_excludes')
self.createupdate_forms = self._has_crud_attr('createupdate_forms')
self.custom_update_view_mixin = self._has_crud_attr('custom_update_view_mixin')
self.custom_create_view_mixin = self._has_crud_attr('custom_create_view_mixin')
# django tables2
self.custom_table2 = self._has_crud_attr('custom_table2')
self.tables2_fields = self._has_crud_attr('tables2_fields')
self.tables2_css_class = self._has_crud_attr('tables2_css_class')
self.tables2_pagination = self._has_crud_attr('tables2_pagination')
self.permission_required = self._has_crud_attr('permission_required')
self.permissions = self._has_crud_attr('permissions')
self.login_required = self._has_crud_attr('login_required')
self.custom_templates = self._has_crud_attr('custom_templates')
self.custom_queryset = self._has_crud_attr('custom_queryset')
self.custom_context = self._has_crud_attr('custom_context')
self.inlineformset = self.get_inlineformset
self.custom_postfix_url = self.postfix_url
@property
def _has_crud_attr(self, attr):
return getattr(self.crud, attr, None)
def view_permission(self, view_type):
if self.permissions:
return self.permissions.get(view_type, None)
else:
return '{}.{}_{}'.format(self.app, self.model, view_type)
@property
def check_login_required(self):
return True if self.login_required else False
@property
def check_permission_required(self):
return True if self.permission_required else False
@property
def get_inlineformset(self):
if self.crud.inlineformset:
return self.crud.inlineformset().construct_formset()
else:
return None
@property
def postfix_url(self):
return helpers.custom_postfix_url(self.crud(), self.model)
|
asifpy/django-crudbuilder
|
crudbuilder/templatetags/crudbuilder.py
|
get_verbose_field_name
|
python
|
def get_verbose_field_name(instance, field_name):
fields = [field.name for field in instance._meta.fields]
if field_name in fields:
return instance._meta.get_field(field_name).verbose_name
else:
return field_name
|
Returns verbose_name for a field.
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/templatetags/crudbuilder.py#L63-L71
| null |
import re
from math import floor
from django import template
from itertools import chain
from django.template.defaultfilters import stringfilter
from collections import namedtuple
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
register = template.Library()
Field = namedtuple('Field', 'name verbose_name')
CrudDetail = namedtuple('CrudDetail', ['app', 'model', 'list_url'])
@register.filter
@stringfilter
def undertospaced(value):
return value.replace("_", " ").title()
@register.filter
def get_value(obj, field):
try:
return getattr(obj, 'get_%s_display' % field)()
except:
return getattr(obj, field)
@register.filter
def class_name(obj):
return obj.__class__.__name__
@register.filter
def crud_detail(crud_key):
app, model, postfix_url = crud_key.split('-', 2)
list_url = '{}-{}-list'.format(app, postfix_url)
return CrudDetail(app, model, list_url)
@register.filter
def get_model_fields(obj, detail_exclude=None):
model = obj.__class__
excludes = ['pk']
property_fields = []
for name in dir(model):
if name not in excludes and isinstance(
getattr(model, name, None), property
):
property_fields.append(Field(name=name, verbose_name=name))
fields = chain(obj._meta.fields, property_fields)
if detail_exclude:
fields = [field for field in fields if field.name not in detail_exclude]
return fields
@register.filter
@register.filter(is_safe=True)
def label_with_class(value, arg):
"""Style adjustments"""
return value.label_tag(attrs={'class': arg})
@register.filter(is_safe=True)
def input_with_class(value, arg):
value.field.widget.attrs['class'] = arg
return value
@register.filter(is_safe=True)
def inline_objects(object, inline_fk):
inline_model = inline_fk.model
related_filter = {inline_fk.name: object}
return inline_model.objects.filter(**related_filter)
@register.inclusion_tag('crudbuilder/widgets/tables/pagination.html')
def bootstrap_pagination(page, **kwargs):
pagination_kwargs = kwargs.copy()
pagination_kwargs['page'] = page
return get_pagination_context(**pagination_kwargs)
def get_pagination_context(page, pages_to_show=11,
url=None, size=None, extra=None,
parameter_name='page'):
"""
Generate Bootstrap pagination context from a page object
"""
pages_to_show = int(pages_to_show)
if pages_to_show < 1:
raise ValueError(
"Pagination pages_to_show should be a positive"
"integer, you specified {pages}".format(
pages=pages_to_show)
)
num_pages = page.paginator.num_pages
current_page = page.number
half_page_num = int(floor(pages_to_show / 2))
if half_page_num < 0:
half_page_num = 0
first_page = current_page - half_page_num
if first_page <= 1:
first_page = 1
if first_page > 1:
pages_back = first_page - half_page_num
if pages_back < 1:
pages_back = 1
else:
pages_back = None
last_page = first_page + pages_to_show - 1
if pages_back is None:
last_page += 1
if last_page > num_pages:
last_page = num_pages
if last_page < num_pages:
pages_forward = last_page + half_page_num
if pages_forward > num_pages:
pages_forward = num_pages
else:
pages_forward = None
if first_page > 1:
first_page -= 1
if pages_back is not None and pages_back > 1:
pages_back -= 1
else:
pages_back = None
pages_shown = []
for i in range(first_page, last_page + 1):
pages_shown.append(i)
# Append proper character to url
if url:
# Remove existing page GET parameters
url = force_text(url)
url = re.sub(r'\?{0}\=[^\&]+'.format(parameter_name), '?', url)
url = re.sub(r'\&{0}\=[^\&]+'.format(parameter_name), '', url)
# Append proper separator
if '?' in url:
url += '&'
else:
url += '?'
# Append extra string to url
if extra:
if not url:
url = '?'
url += force_text(extra) + '&'
if url:
url = url.replace('?&', '?')
# Set CSS classes, see http://getbootstrap.com/components/#pagination
pagination_css_classes = ['pagination']
if size == 'small':
pagination_css_classes.append('pagination-sm')
elif size == 'large':
pagination_css_classes.append('pagination-lg')
# Build context object
return {
'bootstrap_pagination_url': url,
'num_pages': num_pages,
'current_page': current_page,
'first_page': first_page,
'last_page': last_page,
'pages_shown': pages_shown,
'pages_back': pages_back,
'pages_forward': pages_forward,
'pagination_css_classes': ' '.join(pagination_css_classes),
'parameter_name': parameter_name,
}
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_modelform
|
python
|
def generate_modelform(self):
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
|
Generate modelform from Django modelform_factory
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L58-L64
| null |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.get_template
|
python
|
def get_template(self, tname):
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
|
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L66-L77
| null |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_list_view
|
python
|
def generate_list_view(self):
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
|
Generate class based view for ListView
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L85-L111
|
[
"def plural(text):\n \"\"\"\n >>> plural('activity')\n 'activities'\n \"\"\"\n aberrant = {\n 'knife': 'knives',\n 'self': 'selves',\n 'elf': 'elves',\n 'life': 'lives',\n 'hoof': 'hooves',\n 'leaf': 'leaves',\n 'echo': 'echoes',\n 'embargo': 'embargoes',\n 'hero': 'heroes',\n 'potato': 'potatoes',\n 'tomato': 'tomatoes',\n 'torpedo': 'torpedoes',\n 'veto': 'vetoes',\n 'child': 'children',\n 'woman': 'women',\n 'man': 'men',\n 'person': 'people',\n 'goose': 'geese',\n 'mouse': 'mice',\n 'barracks': 'barracks',\n 'deer': 'deer',\n 'nucleus': 'nuclei',\n 'syllabus': 'syllabi',\n 'focus': 'foci',\n 'fungus': 'fungi',\n 'cactus': 'cacti',\n 'phenomenon': 'phenomena',\n 'index': 'indices',\n 'appendix': 'appendices',\n 'criterion': 'criteria',\n\n\n }\n\n if text in aberrant:\n result = '%s' % aberrant[text]\n else:\n postfix = 's'\n if len(text) > 2:\n vowels = 'aeiou'\n if text[-2:] in ('ch', 'sh'):\n postfix = 'es'\n elif text[-1:] == 'y':\n if (text[-2:-1] in vowels) or (text[0] in string.ascii_uppercase):\n postfix = 's'\n else:\n postfix = 'ies'\n text = text[:-1]\n elif text[-2:] == 'is':\n postfix = 'es'\n text = text[:-2]\n elif text[-1:] in ('s', 'z', 'x'):\n postfix = 'es'\n\n result = '%s%s' % (text, postfix)\n return result\n",
"def model_class_form(name):\n \"\"\"\n >>> model_class_form('foo_bar_baz')\n 'FooBarBaz'\n \"\"\"\n return capword(underToMixed(name))\n",
"def view_permission(self, view_type):\n if self.permissions:\n return self.permissions.get(view_type, None)\n else:\n return '{}.{}_{}'.format(self.app, self.model, view_type)\n",
"def get_actual_table(self):\n if self.custom_table2:\n return self.custom_table2\n else:\n table_builder = TableBuilder(self.app, self.model, self.crud)\n return table_builder.generate_table()\n",
"def get_template(self, tname):\n \"\"\"\n - Get custom template from CRUD class, if it is defined in it\n - No custom template in CRUD class, then use the default template\n \"\"\"\n\n if self.custom_templates and self.custom_templates.get(tname, None):\n return self.custom_templates.get(tname)\n elif self.inlineformset:\n return 'crudbuilder/inline/{}.html'.format(tname)\n else:\n return 'crudbuilder/instance/{}.html'.format(tname)\n"
] |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_create_view
|
python
|
def generate_create_view(self):
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
|
Generate class based view for CreateView
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L113-L141
|
[
"def model_class_form(name):\n \"\"\"\n >>> model_class_form('foo_bar_baz')\n 'FooBarBaz'\n \"\"\"\n return capword(underToMixed(name))\n",
"def view_permission(self, view_type):\n if self.permissions:\n return self.permissions.get(view_type, None)\n else:\n return '{}.{}_{}'.format(self.app, self.model, view_type)\n",
"def get_actual_form(self, view):\n if self.createupdate_forms and self.createupdate_forms.get(view, None):\n return self.createupdate_forms.get(view)\n elif self.custom_modelform:\n return self.custom_modelform\n else:\n return self.generate_modelform()\n",
"def get_template(self, tname):\n \"\"\"\n - Get custom template from CRUD class, if it is defined in it\n - No custom template in CRUD class, then use the default template\n \"\"\"\n\n if self.custom_templates and self.custom_templates.get(tname, None):\n return self.custom_templates.get(tname)\n elif self.inlineformset:\n return 'crudbuilder/inline/{}.html'.format(tname)\n else:\n return 'crudbuilder/instance/{}.html'.format(tname)\n",
"def get_createupdate_mixin(self):\n if self.inlineformset:\n return InlineFormsetViewMixin\n else:\n return CreateUpdateViewMixin\n"
] |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_detail_view
|
python
|
def generate_detail_view(self):
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
|
Generate class based view for DetailView
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L143-L160
|
[
"def model_class_form(name):\n \"\"\"\n >>> model_class_form('foo_bar_baz')\n 'FooBarBaz'\n \"\"\"\n return capword(underToMixed(name))\n",
"def view_permission(self, view_type):\n if self.permissions:\n return self.permissions.get(view_type, None)\n else:\n return '{}.{}_{}'.format(self.app, self.model, view_type)\n",
"def get_template(self, tname):\n \"\"\"\n - Get custom template from CRUD class, if it is defined in it\n - No custom template in CRUD class, then use the default template\n \"\"\"\n\n if self.custom_templates and self.custom_templates.get(tname, None):\n return self.custom_templates.get(tname)\n elif self.inlineformset:\n return 'crudbuilder/inline/{}.html'.format(tname)\n else:\n return 'crudbuilder/instance/{}.html'.format(tname)\n"
] |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_update_view
|
python
|
def generate_update_view(self):
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
|
Generate class based view for UpdateView
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L162-L189
|
[
"def model_class_form(name):\n \"\"\"\n >>> model_class_form('foo_bar_baz')\n 'FooBarBaz'\n \"\"\"\n return capword(underToMixed(name))\n",
"def view_permission(self, view_type):\n if self.permissions:\n return self.permissions.get(view_type, None)\n else:\n return '{}.{}_{}'.format(self.app, self.model, view_type)\n",
"def get_actual_form(self, view):\n if self.createupdate_forms and self.createupdate_forms.get(view, None):\n return self.createupdate_forms.get(view)\n elif self.custom_modelform:\n return self.custom_modelform\n else:\n return self.generate_modelform()\n",
"def get_template(self, tname):\n \"\"\"\n - Get custom template from CRUD class, if it is defined in it\n - No custom template in CRUD class, then use the default template\n \"\"\"\n\n if self.custom_templates and self.custom_templates.get(tname, None):\n return self.custom_templates.get(tname)\n elif self.inlineformset:\n return 'crudbuilder/inline/{}.html'.format(tname)\n else:\n return 'crudbuilder/instance/{}.html'.format(tname)\n",
"def get_createupdate_mixin(self):\n if self.inlineformset:\n return InlineFormsetViewMixin\n else:\n return CreateUpdateViewMixin\n"
] |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_delete_view(self):
"""Generate class based view for DeleteView"""
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
asifpy/django-crudbuilder
|
crudbuilder/views.py
|
ViewBuilder.generate_delete_view
|
python
|
def generate_delete_view(self):
name = model_class_form(self.model + 'DeleteView')
delete_args = dict(
model=self.get_model_class,
template_name=self.get_template('delete'),
permissions=self.view_permission('delete'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
delete_class = type(name, (CrudBuilderMixin, DeleteView), delete_args)
self.classes[name] = delete_class
return delete_class
|
Generate class based view for DeleteView
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/views.py#L191-L207
|
[
"def model_class_form(name):\n \"\"\"\n >>> model_class_form('foo_bar_baz')\n 'FooBarBaz'\n \"\"\"\n return capword(underToMixed(name))\n",
"def view_permission(self, view_type):\n if self.permissions:\n return self.permissions.get(view_type, None)\n else:\n return '{}.{}_{}'.format(self.app, self.model, view_type)\n",
"def get_template(self, tname):\n \"\"\"\n - Get custom template from CRUD class, if it is defined in it\n - No custom template in CRUD class, then use the default template\n \"\"\"\n\n if self.custom_templates and self.custom_templates.get(tname, None):\n return self.custom_templates.get(tname)\n elif self.inlineformset:\n return 'crudbuilder/inline/{}.html'.format(tname)\n else:\n return 'crudbuilder/instance/{}.html'.format(tname)\n"
] |
class ViewBuilder(BaseBuilder):
"""View builder which returns all the CRUD class based views"""
def __init__(self, *args, **kwargs):
super(ViewBuilder, self).__init__(*args, **kwargs)
self.classes = {}
def generate_crud(self):
self.generate_list_view()
self.generate_create_view()
self.generate_detail_view()
self.generate_update_view()
self.generate_delete_view()
def get_actual_form(self, view):
if self.createupdate_forms and self.createupdate_forms.get(view, None):
return self.createupdate_forms.get(view)
elif self.custom_modelform:
return self.custom_modelform
else:
return self.generate_modelform()
def get_actual_table(self):
if self.custom_table2:
return self.custom_table2
else:
table_builder = TableBuilder(self.app, self.model, self.crud)
return table_builder.generate_table()
def generate_modelform(self):
"""Generate modelform from Django modelform_factory"""
model_class = self.get_model_class
excludes = self.modelform_excludes if self.modelform_excludes else []
_ObjectForm = modelform_factory(model_class, exclude=excludes)
return _ObjectForm
def get_template(self, tname):
"""
- Get custom template from CRUD class, if it is defined in it
- No custom template in CRUD class, then use the default template
"""
if self.custom_templates and self.custom_templates.get(tname, None):
return self.custom_templates.get(tname)
elif self.inlineformset:
return 'crudbuilder/inline/{}.html'.format(tname)
else:
return 'crudbuilder/instance/{}.html'.format(tname)
def get_createupdate_mixin(self):
if self.inlineformset:
return InlineFormsetViewMixin
else:
return CreateUpdateViewMixin
def generate_list_view(self):
"""Generate class based view for ListView"""
name = model_class_form(self.model + 'ListView')
list_args = dict(
model=self.get_model_class,
context_object_name=plural(self.model),
template_name=self.get_template('list'),
table_class=self.get_actual_table(),
context_table_name='table_objects',
crud=self.crud,
permissions=self.view_permission('list'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
table_pagination={'per_page': self.tables2_pagination or 10},
custom_queryset=self.custom_queryset,
custom_context=self.custom_context,
custom_postfix_url=self.custom_postfix_url
)
list_class = type(
name,
(BaseListViewMixin, SingleTableView),
list_args
)
self.classes[name] = list_class
return list_class
def generate_create_view(self):
"""Generate class based view for CreateView"""
name = model_class_form(self.model + 'CreateView')
create_args = dict(
form_class=self.get_actual_form('create'),
model=self.get_model_class,
template_name=self.get_template('create'),
permissions=self.view_permission('create'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_form=self.createupdate_forms or self.custom_modelform,
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), CreateView]
if self.custom_create_view_mixin:
parent_classes.insert(0, self.custom_create_view_mixin)
create_class = type(
name,
tuple(parent_classes),
create_args
)
self.classes[name] = create_class
return create_class
def generate_detail_view(self):
"""Generate class based view for DetailView"""
name = model_class_form(self.model + 'DetailView')
detail_args = dict(
detailview_excludes=self.detailview_excludes,
model=self.get_model_class,
template_name=self.get_template('detail'),
login_required=self.check_login_required,
permissions=self.view_permission('detail'),
inlineformset=self.inlineformset,
permission_required=self.check_permission_required,
custom_postfix_url=self.custom_postfix_url
)
detail_class = type(name, (BaseDetailViewMixin, DetailView), detail_args)
self.classes[name] = detail_class
return detail_class
def generate_update_view(self):
"""Generate class based view for UpdateView"""
name = model_class_form(self.model + 'UpdateView')
update_args = dict(
form_class=self.get_actual_form('update'),
model=self.get_model_class,
template_name=self.get_template('update'),
permissions=self.view_permission('update'),
permission_required=self.check_permission_required,
login_required=self.check_login_required,
inlineformset=self.inlineformset,
custom_form=self.createupdate_forms or self.custom_modelform,
success_url=reverse_lazy('{}-{}-list'.format(self.app, self.custom_postfix_url)),
custom_postfix_url=self.custom_postfix_url
)
parent_classes = [self.get_createupdate_mixin(), UpdateView]
if self.custom_update_view_mixin:
parent_classes.insert(0, self.custom_update_view_mixin)
update_class = type(
name,
tuple(parent_classes),
update_args
)
self.classes[name] = update_class
return update_class
|
AlexMathew/scrapple
|
scrapple/commands/web.py
|
WebCommand.execute_command
|
python
|
def execute_command(self):
print(Back.GREEN + Fore.BLACK + "Scrapple Web Interface")
print(Back.RESET + Fore.RESET)
p1 = Process(target = self.run_flask)
p2 = Process(target = lambda : webbrowser.open('http://127.0.0.1:5000'))
p1.start()
p2.start()
|
The web command runs the Scrapple web interface through a simple \
`Flask <http://flask.pocoo.org>`_ app.
When the execute_command() method is called from the \
:ref:`runCLI() <implementation-cli>` function, it starts of two simultaneous \
processes :
- Calls the run_flask() method to start the Flask app on port 5000 of localhost
- Opens the web interface on a web browser
The '/' view of the Flask app, opens up the Scrapple web interface. This \
provides a basic form, to fill in the required configuration file. On submitting \
the form, it makes a POST request, passing in the form in the request header. \
This form is passed to the form_to_json() \
:ref:`utility function <implementation-utils>`, where the form is converted into \
the resultant JSON configuration file.
Currently, closing the web command execution requires making a keyboard interrupt \
on the command line after the web interface has been closed.
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/web.py#L39-L67
| null |
class WebCommand(command.Command):
"""
Defines the execution of :ref:`web <command-web>`
"""
app = Flask(__name__)
def __init__(self, args):
super(WebCommand, self).__init__(args)
init()
def run_flask(self):
try:
WebCommand.app.run(host="127.0.0.1", port=5000)
except Exception as e:
import sys
sys.exit()
@app.route('/', methods=['GET', 'POST'])
def home():
if request.method == 'POST':
try:
form_to_json(request.form)
return render_template(
'complete.html',
filename=request.form['project_name']
)
except Exception as e:
return render_template('error.html', error=e)
else:
return render_template('home.html')
|
AlexMathew/scrapple
|
scrapple/commands/generate.py
|
GenerateCommand.execute_command
|
python
|
def execute_command(self):
print(Back.GREEN + Fore.BLACK + "Scrapple Generate")
print(Back.RESET + Fore.RESET)
directory = os.path.join(scrapple.__path__[0], 'templates', 'scripts')
with open(os.path.join(directory, 'generate.txt'), 'r') as f:
template_content = f.read()
template = Template(template_content)
try:
with open(self.args['<projectname>'] + '.json', 'r') as f:
config = json.load(f)
if self.args['--output_type'] == 'csv':
from scrapple.utils.config import extract_fieldnames
config['fields'] = str(extract_fieldnames(config))
config['output_file'] = self.args['<output_filename>']
config['output_type'] = self.args['--output_type']
rendered = template.render(config=config)
with open(self.args['<output_filename>'] + '.py', 'w') as f:
f.write(rendered)
print(Back.WHITE + Fore.RED + self.args['<output_filename>'], \
".py has been created" + Back.RESET + Fore.RESET, sep="")
except IOError:
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json does not ", \
"exist. Use ``scrapple genconfig``." + Back.RESET + Fore.RESET, sep="")
|
The generate command uses `Jinja2 <http://jinja.pocoo.org/>`_ templates \
to create Python scripts, according to the specification in the configuration \
file. The predefined templates use the extract_content() method of the \
:ref:`selector classes <implementation-selectors>` to implement linear extractors \
and use recursive for loops to implement multiple levels of link crawlers. This \
implementation is effectively a representation of the traverse_next() \
:ref:`utility function <implementation-utils>`, using the loop depth to \
differentiate between levels of the crawler execution.
According to the --output_type argument in the CLI input, the results are \
written into a JSON document or a CSV document.
The Python script is written into <output_filename>.py - running this file \
is the equivalent of using the Scrapple :ref:`run command <command-run>`.
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/generate.py#L28-L67
|
[
"def extract_fieldnames(config):\n \"\"\"\n Function to return a list of unique field names from the config file\n\n :param config: The configuration file that contains the specification of the extractor\n :return: A list of field names from the config file\n\n \"\"\"\n fields = []\n for x in get_fields(config):\n if x in fields:\n fields.append(x + '_' + str(fields.count(x) + 1))\n else:\n fields.append(x)\n return fields\n"
] |
class GenerateCommand(command.Command):
"""
Defines the execution of :ref:`generate <command-generate>`
"""
def __init__(self, args):
super(GenerateCommand, self).__init__(args)
init()
|
AlexMathew/scrapple
|
scrapple/selectors/selector.py
|
Selector.extract_content
|
python
|
def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
|
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L81-L119
|
[
"def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):\n\traise NotImplementedError\n"
] |
class Selector(object):
"""
This class defines the basic ``Selector`` object.
"""
__selector_type__ = ''
def __init__(self, url):
"""
The URL of the web page to be loaded is validated - ensuring the schema has \
been specified, and that the URL is valid. A HTTP GET request is made to load \
the web page, and the HTML content of this fetched web page is used to generate \
the :ref:`element tree <concepts-structure>`. This is the element tree that will \
be parsed to extract the necessary content.
"""
try:
headers = {
'content-encoding': 'gzip',
'Accept-Encoding': 'identity, compress, gzip',
'Accept': '*/*'
}
headers['User-Agent'] = random.choice([
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
])
self.url = url
self.content = requests.get(url, headers=headers).content
self.tree = etree.HTML(self.content)
except requests.exceptions.MissingSchema:
raise Exception('URL should be of the form "http://<page_link>')
except requests.exceptions.InvalidURL:
raise Exception('The URL provided is invalid')
except requests.exceptions.ConnectionError:
raise Exception('Ensure that you are connected to the Internet and that the page exists')
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
raise NotImplementedError
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list
def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
AlexMathew/scrapple
|
scrapple/selectors/selector.py
|
Selector.extract_links
|
python
|
def extract_links(self, selector='', *args, **kwargs):
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
|
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L122-L147
|
[
"def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):\n\traise NotImplementedError\n"
] |
class Selector(object):
"""
This class defines the basic ``Selector`` object.
"""
__selector_type__ = ''
def __init__(self, url):
"""
The URL of the web page to be loaded is validated - ensuring the schema has \
been specified, and that the URL is valid. A HTTP GET request is made to load \
the web page, and the HTML content of this fetched web page is used to generate \
the :ref:`element tree <concepts-structure>`. This is the element tree that will \
be parsed to extract the necessary content.
"""
try:
headers = {
'content-encoding': 'gzip',
'Accept-Encoding': 'identity, compress, gzip',
'Accept': '*/*'
}
headers['User-Agent'] = random.choice([
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
])
self.url = url
self.content = requests.get(url, headers=headers).content
self.tree = etree.HTML(self.content)
except requests.exceptions.MissingSchema:
raise Exception('URL should be of the form "http://<page_link>')
except requests.exceptions.InvalidURL:
raise Exception('The URL provided is invalid')
except requests.exceptions.ConnectionError:
raise Exception('Ensure that you are connected to the Internet and that the page exists')
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
raise NotImplementedError
def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list
def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
AlexMathew/scrapple
|
scrapple/selectors/selector.py
|
Selector.extract_tabular
|
python
|
def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list
|
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L150-L187
|
[
"def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):\n\traise NotImplementedError\n",
"def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):\n\t\"\"\"\n\tRow data extraction for extract_tabular\n\t\"\"\"\n\tresult_list = []\n\n\ttry:\n\t\tvalues = self.get_tree_tag(selector)\n\t\tif len(table_headers) >= len(values):\n\t\t\tfrom itertools import izip_longest\n\t\t\tpairs = izip_longest(table_headers, values, fillvalue=default)\n\t\telse:\n\t\t\tfrom itertools import izip\n\t\t\tpairs = izip(table_headers, values)\n\t\tfor head, val in pairs:\n\t\t\tif verbosity > 1:\n\t\t\t\tprint(\"\\nExtracting\", head, \"attribute\", sep=' ', end='')\n\t\t\tif attr.lower() == \"text\":\n\t\t\t\ttry:\n\t\t\t\t\tcontent = connector.join([make_ascii(x).strip() for x in val.itertext()])\n\t\t\t\texcept Exception:\n\t\t\t\t\tcontent = default\n\t\t\t\tcontent = content.replace(\"\\n\", \" \").strip()\n\t\t\telse:\n\t\t\t\tcontent = val.get(attr)\n\t\t\t\tif attr in [\"href\", \"src\"]:\n\t\t\t\t\tcontent = urljoin(self.url, content)\n\t\t\tresult[head] = content\n\t\tresult_list.append(result)\n\texcept XPathError:\n\t\traise Exception(\"Invalid %s selector - %s\" % (self.__selector_type__, selector))\n\texcept TypeError:\n\t\traise Exception(\"Selector expression string to be provided. Got \" + selector)\n\n\treturn result_list\n",
"def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):\n\t\"\"\"\n\tColumn data extraction for extract_tabular\n\t\"\"\"\n\tresult_list = []\n\n\ttry:\n\t\tif type(selector) in [str, unicode]:\n\t\t\tselectors = [selector]\n\t\telif type(selector) == list:\n\t\t\tselectors = selector[:]\n\t\telse:\n\t\t\traise Exception(\"Use a list of selector expressions for the various columns\")\n\t\tfrom itertools import izip, count\n\t\tpairs = izip(table_headers, selectors)\n\t\tcolumns = {}\n\t\tfor head, selector in pairs:\n\t\t\tcolumns[head] = self.get_tree_tag(selector)\n\t\ttry:\n\t\t\tfor i in count(start=0):\n\t\t\t\tr = result.copy()\n\t\t\t\tfor head in columns.keys():\n\t\t\t\t\tif verbosity > 1:\n\t\t\t\t\t\tprint(\"\\nExtracting\", head, \"attribute\", sep=' ', end='')\n\t\t\t\t\tcol = columns[head][i]\n\t\t\t\t\tif attr == \"text\":\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tcontent = connector.join([make_ascii(x).strip() for x in col.itertext()])\n\t\t\t\t\t\texcept Exception:\n\t\t\t\t\t\t\tcontent = default\n\t\t\t\t\t\tcontent = content.replace(\"\\n\", \" \").strip()\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontent = col.get(attr)\n\t\t\t\t\t\tif attr in [\"href\", \"src\"]:\n\t\t\t\t\t\t\tcontent = urljoin(self.url, content)\n\t\t\t\t\tr[head] = content\n\t\t\t\tresult_list.append(r)\n\t\texcept IndexError:\n\t\t\tpass\n\texcept XPathError:\n\t\traise Exception(\"Invalid %s selector - %s\" % (self.__selector_type__, selector))\n\texcept TypeError:\n\t\traise Exception(\"Selector expression string to be provided. Got \" + selector)\n\n\treturn result_list\n"
] |
class Selector(object):
"""
This class defines the basic ``Selector`` object.
"""
__selector_type__ = ''
def __init__(self, url):
"""
The URL of the web page to be loaded is validated - ensuring the schema has \
been specified, and that the URL is valid. A HTTP GET request is made to load \
the web page, and the HTML content of this fetched web page is used to generate \
the :ref:`element tree <concepts-structure>`. This is the element tree that will \
be parsed to extract the necessary content.
"""
try:
headers = {
'content-encoding': 'gzip',
'Accept-Encoding': 'identity, compress, gzip',
'Accept': '*/*'
}
headers['User-Agent'] = random.choice([
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
])
self.url = url
self.content = requests.get(url, headers=headers).content
self.tree = etree.HTML(self.content)
except requests.exceptions.MissingSchema:
raise Exception('URL should be of the form "http://<page_link>')
except requests.exceptions.InvalidURL:
raise Exception('The URL provided is invalid')
except requests.exceptions.ConnectionError:
raise Exception('Ensure that you are connected to the Internet and that the page exists')
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
raise NotImplementedError
def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
AlexMathew/scrapple
|
scrapple/selectors/selector.py
|
Selector.extract_rows
|
python
|
def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
Row data extraction for extract_tabular
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L190-L224
|
[
"def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):\n\traise NotImplementedError\n"
] |
class Selector(object):
"""
This class defines the basic ``Selector`` object.
"""
__selector_type__ = ''
def __init__(self, url):
"""
The URL of the web page to be loaded is validated - ensuring the schema has \
been specified, and that the URL is valid. A HTTP GET request is made to load \
the web page, and the HTML content of this fetched web page is used to generate \
the :ref:`element tree <concepts-structure>`. This is the element tree that will \
be parsed to extract the necessary content.
"""
try:
headers = {
'content-encoding': 'gzip',
'Accept-Encoding': 'identity, compress, gzip',
'Accept': '*/*'
}
headers['User-Agent'] = random.choice([
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
])
self.url = url
self.content = requests.get(url, headers=headers).content
self.tree = etree.HTML(self.content)
except requests.exceptions.MissingSchema:
raise Exception('URL should be of the form "http://<page_link>')
except requests.exceptions.InvalidURL:
raise Exception('The URL provided is invalid')
except requests.exceptions.ConnectionError:
raise Exception('Ensure that you are connected to the Internet and that the page exists')
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
raise NotImplementedError
def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list
def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Column data extraction for extract_tabular
"""
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
AlexMathew/scrapple
|
scrapple/selectors/selector.py
|
Selector.extract_columns
|
python
|
def extract_columns(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
result_list = []
try:
if type(selector) in [str, unicode]:
selectors = [selector]
elif type(selector) == list:
selectors = selector[:]
else:
raise Exception("Use a list of selector expressions for the various columns")
from itertools import izip, count
pairs = izip(table_headers, selectors)
columns = {}
for head, selector in pairs:
columns[head] = self.get_tree_tag(selector)
try:
for i in count(start=0):
r = result.copy()
for head in columns.keys():
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
col = columns[head][i]
if attr == "text":
try:
content = connector.join([make_ascii(x).strip() for x in col.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = col.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
r[head] = content
result_list.append(r)
except IndexError:
pass
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
Column data extraction for extract_tabular
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/selectors/selector.py#L227-L271
|
[
"def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):\n\traise NotImplementedError\n"
] |
class Selector(object):
"""
This class defines the basic ``Selector`` object.
"""
__selector_type__ = ''
def __init__(self, url):
"""
The URL of the web page to be loaded is validated - ensuring the schema has \
been specified, and that the URL is valid. A HTTP GET request is made to load \
the web page, and the HTML content of this fetched web page is used to generate \
the :ref:`element tree <concepts-structure>`. This is the element tree that will \
be parsed to extract the necessary content.
"""
try:
headers = {
'content-encoding': 'gzip',
'Accept-Encoding': 'identity, compress, gzip',
'Accept': '*/*'
}
headers['User-Agent'] = random.choice([
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:30.0) Gecko/20100101 Firefox/30.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 5.1; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0',
'Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36',
'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2',
])
self.url = url
self.content = requests.get(url, headers=headers).content
self.tree = etree.HTML(self.content)
except requests.exceptions.MissingSchema:
raise Exception('URL should be of the form "http://<page_link>')
except requests.exceptions.InvalidURL:
raise Exception('The URL provided is invalid')
except requests.exceptions.ConnectionError:
raise Exception('Ensure that you are connected to the Internet and that the page exists')
def get_tree_tag(self, selector='', get_one=False, *args, **kwargs):
raise NotImplementedError
def extract_content(self, selector='', attr='', default='', connector='', *args, **kwargs):
"""
Method for performing the content extraction for the particular selector type. \
If the selector is "url", the URL of the current web page is returned.
Otherwise, the selector expression is used to extract content. The particular \
attribute to be extracted ("text", "href", etc.) is specified in the method \
arguments, and this is used to extract the required content. If the content \
extracted is a link (from an attr value of "href" or "src"), the URL is parsed \
to convert the relative path into an absolute path.
If the selector does not fetch any content, the default value is returned. \
If no default value is specified, an exception is raised.
:param selector: The XPath expression
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param connector: String connector for list of data returned for a particular selector
:return: The extracted content
"""
try:
if selector.lower() == "url":
return self.url
if attr.lower() == "text":
tag = self.get_tree_tag(selector=selector, get_one=True)
content = connector.join([make_ascii(x).strip() for x in tag.itertext()])
content = content.replace("\n", " ").strip()
else:
tag = self.get_tree_tag(selector=selector, get_one=True)
content = tag.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
return content
except IndexError:
if default is not "":
return default
raise Exception("There is no content for the %s selector - %s" % (self.__selector_type__, selector))
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_links(self, selector='', *args, **kwargs):
"""
Method for performing the link extraction for the crawler. \
The selector passed as the argument is a selector to point to the anchor tags \
that the crawler should pass through. A list of links is obtained, and the links \
are iterated through. The relative paths are converted into absolute paths and \
a ``XpathSelector``/``CssSelector`` object (as is the case) is created with the URL of the next page as the argument \
and this created object is yielded.
The extract_links method basically generates ``XpathSelector``/``CssSelector`` objects for all of \
the links to be crawled through.
:param selector: The selector for the anchor tags to be crawled through
:return: A ``XpathSelector``/``CssSelector`` object for every page to be crawled through
"""
try:
links = self.get_tree_tag(selector=selector)
for link in links:
next_url = urljoin(self.url, link.get('href'))
yield type(self)(next_url)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except Exception:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
def extract_tabular(self, header='', prefix='', suffix='', table_type='', *args, **kwargs):
"""
Method for performing the tabular data extraction. \
:param result: A dictionary containing the extracted data so far
:param table_type: Can be "rows" or "columns". This determines the type of table to be extracted. \
A row extraction is when there is a single row to be extracted and mapped to a set of headers. \
A column extraction is when a set of rows have to be extracted, giving a list of header-value mappings.
:param header: The headers to be used for the table. This can be a list of headers, or a selector that gives the list of headers
:param prefix: A prefix to be added to each header
:param suffix: A suffix to be added to each header
:param selector: For row extraction, this is a selector that gives the row to be extracted. \
For column extraction, this is a list of selectors for each column.
:param attr: The attribute to be extracted from the selected tag
:param default: The default value to be used if the selector does not return any data
:param verbosity: The verbosity set as the argument for scrapple run
:return: A 2-tuple containing the list of all the column headers extracted and the list of \
dictionaries which contain (header, content) pairs
"""
if type(header) in [str, unicode]:
try:
header_list = self.get_tree_tag(header)
table_headers = [prefix + h.text + suffix for h in header_list]
except XPathError:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
except Exception:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
else:
table_headers = [prefix + h + suffix for h in header]
if len(table_headers) == 0:
raise Exception("Invalid %s selector for table header - %s" % (self.__selector_type__, header))
if table_type not in ["rows", "columns"]:
raise Exception("Specify 'rows' or 'columns' in table_type")
if table_type == "rows":
result_list = self.extract_rows(table_headers=table_headers, *args, **kwargs)
else:
result_list = self.extract_columns(table_headers=table_headers, *args, **kwargs)
return table_headers, result_list
def extract_rows(self, result={}, selector='', table_headers=[], attr='', connector='', default='', verbosity=0, *args, **kwargs):
"""
Row data extraction for extract_tabular
"""
result_list = []
try:
values = self.get_tree_tag(selector)
if len(table_headers) >= len(values):
from itertools import izip_longest
pairs = izip_longest(table_headers, values, fillvalue=default)
else:
from itertools import izip
pairs = izip(table_headers, values)
for head, val in pairs:
if verbosity > 1:
print("\nExtracting", head, "attribute", sep=' ', end='')
if attr.lower() == "text":
try:
content = connector.join([make_ascii(x).strip() for x in val.itertext()])
except Exception:
content = default
content = content.replace("\n", " ").strip()
else:
content = val.get(attr)
if attr in ["href", "src"]:
content = urljoin(self.url, content)
result[head] = content
result_list.append(result)
except XPathError:
raise Exception("Invalid %s selector - %s" % (self.__selector_type__, selector))
except TypeError:
raise Exception("Selector expression string to be provided. Got " + selector)
return result_list
|
AlexMathew/scrapple
|
scrapple/cmd.py
|
runCLI
|
python
|
def runCLI():
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n')
|
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/cmd.py#L49-L67
|
[
"def get_command_class(command):\n \"\"\"\n Called from runCLI() to select the command class for the selected command.\n\n :param command: The command to be implemented\n :return: The command class corresponding to the selected command\n \"\"\"\n from scrapple.commands import genconfig, generate, run, web\n commandMapping = {\n \t'genconfig': genconfig,\n \t'generate': generate,\n \t'run': run,\n \t'web': web\n }\n cmdClass = getattr(commandMapping.get(command), command.title() + 'Command')\n return cmdClass\n",
"def check_arguments(args):\n\t\"\"\"\n\tValidates the arguments passed through the CLI commands.\n\n\t:param args: The arguments passed in the CLI, parsed by the docopt module\n\t:return: None\n\n\t\"\"\"\n\tprojectname_re = re.compile(r'[^a-zA-Z0-9_]')\n\tif args['genconfig']:\n\t\tif args['--type'] not in ['scraper', 'crawler']:\n\t\t\traise InvalidType(\"--type has to be 'scraper' or 'crawler'\")\n\t\tif args['--selector'] not in ['xpath', 'css']:\n\t\t\traise InvalidSelector(\"--selector has to be 'xpath' or 'css'\")\n\tif args['generate'] or args['run']:\n\t\tif args['--output_type'] not in ['json', 'csv']:\n\t\t\traise InvalidOutputType(\"--output_type has to be 'json' or 'csv'\")\n\tif args['genconfig'] or args['generate'] or args['run']:\n\t\tif projectname_re.search(args['<projectname>']) is not None:\n\t\t\tmessage = \"<projectname> should consist of letters, digits or _\"\n\t\t\traise InvalidProjectName(message)\n\ttry:\n\t\tif int(args['--levels']) < 1:\n\t\t\tmessage = \"--levels should be greater than, or equal to 1\"\n\t\t\traise InvalidLevels(message)\n\texcept (TypeError, ValueError):\n\t\tmessage = \" \".join([\n\t\t\t\"--levels should be an integer and not of type\",\n\t\t\t\"{}\".format(type(args['--levels']))\n\t\t])\n\t\traise InvalidLevels(message)\n"
] |
"""
Usage:
scrapple (-h | --help | --version)
scrapple genconfig <projectname> <url> [--type=<type>] [--selector=<selector>] \
[--levels=<levels>]
scrapple run <projectname> <output_filename> [--output_type=<output_type>] \
[--verbosity=<verbosity>]
scrapple generate <projectname> <output_filename> [--output_type=<output_type>]
Options:
-h, --help
Show this help message and exit
--version, -V
Display the version of Scrapple
--type=<type>, -t <type>
Specifies if the script generated is a page scraper or a crawler [default: scraper]
--selector=<selector>, -s <selector>
Specifies if XPath expressions or CSS selectors are used [default: xpath]
--levels=<levels>, -l <levels>
Specifies the number of levels for the crawler configuration file [default: 1]
--output_type=<output_type>, -o <output_type>
Specifies if the generated output is stored as CSV or JSON [default: json]
--verbosity=<verbosity>, -v <verbosity>
Specifies how much of the running is logged. 0 runs the implementation silently; 1 gives basic \
information, like the URL currently being processed; 2 gives a detailed description of the fields being \
extracted [default: 0]
"""
from __future__ import print_function
from operator import itemgetter
from docopt import docopt
from scrapple.utils.dynamicdispatch import get_command_class
from scrapple.utils.exceptions import (InvalidLevels, InvalidOutputType,
InvalidProjectName, InvalidSelector,
InvalidType, check_arguments)
POSSIBLE_EXCEPTIONS = (
InvalidType,
InvalidSelector,
InvalidOutputType,
InvalidProjectName,
InvalidLevels
)
if __name__ == '__main__':
runCLI()
|
AlexMathew/scrapple
|
scrapple/utils/exceptions.py
|
check_arguments
|
python
|
def check_arguments(args):
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise InvalidType("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise InvalidSelector("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise InvalidOutputType("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
message = "<projectname> should consist of letters, digits or _"
raise InvalidProjectName(message)
try:
if int(args['--levels']) < 1:
message = "--levels should be greater than, or equal to 1"
raise InvalidLevels(message)
except (TypeError, ValueError):
message = " ".join([
"--levels should be an integer and not of type",
"{}".format(type(args['--levels']))
])
raise InvalidLevels(message)
|
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/exceptions.py#L36-L66
| null |
"""
scrapple.utils.exceptions
~~~~~~~~~~~~~~~~~~~~~~~~~
Functions related to handling exceptions in the input arguments
"""
import re
class InvalidType(ValueError):
"""Exception class for invalid type in arguments."""
pass
class InvalidSelector(ValueError):
"""Exception class for invalid in arguments."""
pass
class InvalidOutputType(ValueError):
"""Exception class for invalid output_type in arguments."""
pass
class InvalidProjectName(ValueError):
"""Exception class for invalid <projectname> in arguments."""
pass
class InvalidLevels(ValueError):
"""Exception class for invalid levels in arguments."""
pass
|
AlexMathew/scrapple
|
scrapple/utils/dynamicdispatch.py
|
get_command_class
|
python
|
def get_command_class(command):
from scrapple.commands import genconfig, generate, run, web
commandMapping = {
'genconfig': genconfig,
'generate': generate,
'run': run,
'web': web
}
cmdClass = getattr(commandMapping.get(command), command.title() + 'Command')
return cmdClass
|
Called from runCLI() to select the command class for the selected command.
:param command: The command to be implemented
:return: The command class corresponding to the selected command
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/dynamicdispatch.py#L8-L23
| null |
"""
scrapple.utils.dynamicdispatch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Functions related to dynamic dispatch of objects
"""
|
AlexMathew/scrapple
|
scrapple/utils/form.py
|
form_to_json
|
python
|
def form_to_json(form):
config = dict()
if form['project_name'] == "":
raise Exception('Project name cannot be empty.')
if form['selector_type'] not in ["css", "xpath"]:
raise Exception('Selector type has to css or xpath')
config['project_name'] = form['project_name']
config['selector_type'] = form['selector_type']
config['scraping'] = dict()
if form['url'] == "":
raise Exception('URL cannot be empty')
config['scraping']['url'] = form['url']
config['scraping']['data'] = list()
for i in itertools.count(start=1):
try:
data = {
'field': form['field_' + str(i)],
'selector': form['selector_' + str(i)],
'attr': form['attribute_' + str(i)],
'default': form['default_' + str(i)]
}
config['scraping']['data'].append(data)
except KeyError:
break
# TODO : Crawler 'next' parameter handling
with open(os.path.join(os.getcwd(), form['project_name'] + '.json'), 'w') as f:
json.dump(config, f)
return
|
Takes the form from the POST request in the web interface, and generates the JSON config\
file
:param form: The form from the POST request
:return: None
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/form.py#L13-L48
| null |
"""
scrapple.utils.form
~~~~~~~~~~~~~~~~~~~
Functions related to form handling.
"""
import itertools
import json
import os
|
AlexMathew/scrapple
|
scrapple/commands/run.py
|
RunCommand.execute_command
|
python
|
def execute_command(self):
try:
self.args['--verbosity'] = int(self.args['--verbosity'])
if self.args['--verbosity'] not in [0, 1, 2]:
raise ValueError
if self.args['--verbosity'] > 0:
print(Back.GREEN + Fore.BLACK + "Scrapple Run")
print(Back.RESET + Fore.RESET)
import json
with open(self.args['<projectname>'] + '.json', 'r') as f:
self.config = json.load(f)
validate_config(self.config)
self.run()
except ValueError:
print(Back.WHITE + Fore.RED + "Use 0, 1 or 2 for verbosity." \
+ Back.RESET + Fore.RESET, sep="")
except IOError:
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json does not ", \
"exist. Use ``scrapple genconfig``." + Back.RESET + Fore.RESET, sep="")
except InvalidConfigException as e:
print(Back.WHITE + Fore.RED + e + Back.RESET + Fore.RESET, sep="")
|
The run command implements the web content extractor corresponding to the given \
configuration file.
The execute_command() validates the input project name and opens the JSON \
configuration file. The run() method handles the execution of the extractor run.
The extractor implementation follows these primary steps :
1. Selects the appropriate :ref:`selector class <implementation-selectors>` through \
a dynamic dispatch, with the selector_type argument from the CLI input.
#. Iterate through the data section in level-0 of the configuration file. \
On each data item, call the extract_content() method from the selector class to \
extract the content according to the specified extractor rule.
#. If there are multiple levels of the extractor, i.e, if there is a 'next' \
attribute in the configuration file, call the traverse_next() \
:ref:`utility function <implementation-utils>` and parse through successive levels \
of the configuration file.
#. According to the --output_type argument, the result data is saved in a JSON \
document or a CSV document.
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/run.py#L29-L74
|
[
"def validate_config(config):\n \"\"\"\n Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.\n\n :param config: The configuration file that contains the specification of the extractor\n :return: True if config is valid, else raises a exception that specifies the correction to be made\n\n \"\"\"\n fields = [f for f in get_fields(config)]\n if len(fields) != len(set(fields)):\n raise InvalidConfigException(\n \"Invalid configuration file - %d duplicate field names\" % len(fields) - len(set(fields))\n )\n return True\n",
"def run(self):\n selectorClassMapping = {\n 'xpath': XpathSelector,\n 'css': CssSelector\n }\n selectorClass = selectorClassMapping.get(self.config['selector_type'].lower())\n results = dict()\n results['project'] = self.args['<projectname>']\n results['data'] = list()\n try:\n result = dict()\n tabular_data_headers = dict()\n if self.args['--verbosity'] > 0:\n print()\n print(Back.YELLOW + Fore.BLUE + \"Loading page \", self.config['scraping']['url'] \\\n + Back.RESET + Fore.RESET, end='')\n selector = selectorClass(self.config['scraping']['url'])\n for attribute in self.config['scraping']['data']:\n if attribute['field'] != \"\":\n if self.args['--verbosity'] > 1:\n print(\"\\nExtracting\", attribute['field'], \"attribute\", sep=' ', end='')\n result[attribute['field']] = selector.extract_content(**attribute)\n if not self.config['scraping'].get('table'):\n result_list = [result]\n else:\n tables = self.config['scraping'].get('table', [])\n for table in tables:\n if table.get('selector', '').strip() != '':\n table.update({\n 'result': result,\n 'verbosity': self.args['--verbosity']\n })\n table_headers, result_list = selector.extract_tabular(**table)\n for th in table_headers:\n if not th in tabular_data_headers:\n tabular_data_headers[th] = len(tabular_data_headers)\n if not self.config['scraping'].get('next'):\n results['data'].extend(result_list)\n else:\n for nextx in self.config['scraping']['next']:\n for tdh, r in traverse_next(selector, nextx, result, verbosity=self.args['--verbosity']):\n results['data'].append(r)\n for th in tdh:\n if not th in tabular_data_headers:\n tabular_data_headers[th] = len(tabular_data_headers)\n except KeyboardInterrupt:\n pass\n except Exception as e:\n print(e)\n finally:\n if self.args['--output_type'] == 'json':\n import json\n with open(os.path.join(os.getcwd(), self.args['<output_filename>'] + '.json'), \\\n 'w') as f:\n json.dump(results, f, indent=4)\n elif self.args['--output_type'] == 'csv':\n import csv\n with open(os.path.join(os.getcwd(), self.args['<output_filename>'] + '.csv'), \\\n 'w') as f:\n fields = extract_fieldnames(self.config)\n data_headers = sorted(tabular_data_headers, key=lambda x:tabular_data_headers[x])\n fields.extend(data_headers)\n writer = csv.DictWriter(f, fieldnames=fields)\n writer.writeheader()\n writer.writerows(results['data'])\n if self.args['--verbosity'] > 0: \n print()\n print(Back.WHITE + Fore.RED + self.args['<output_filename>'], \\\n \".\", self.args['--output_type'], \" has been created\" \\\n + Back.RESET + Fore.RESET, sep=\"\")\n"
] |
class RunCommand(command.Command):
"""
Defines the execution of :ref:`run <command-run>`
"""
def __init__(self, args):
super(RunCommand, self).__init__(args)
init()
def run(self):
selectorClassMapping = {
'xpath': XpathSelector,
'css': CssSelector
}
selectorClass = selectorClassMapping.get(self.config['selector_type'].lower())
results = dict()
results['project'] = self.args['<projectname>']
results['data'] = list()
try:
result = dict()
tabular_data_headers = dict()
if self.args['--verbosity'] > 0:
print()
print(Back.YELLOW + Fore.BLUE + "Loading page ", self.config['scraping']['url'] \
+ Back.RESET + Fore.RESET, end='')
selector = selectorClass(self.config['scraping']['url'])
for attribute in self.config['scraping']['data']:
if attribute['field'] != "":
if self.args['--verbosity'] > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
result[attribute['field']] = selector.extract_content(**attribute)
if not self.config['scraping'].get('table'):
result_list = [result]
else:
tables = self.config['scraping'].get('table', [])
for table in tables:
if table.get('selector', '').strip() != '':
table.update({
'result': result,
'verbosity': self.args['--verbosity']
})
table_headers, result_list = selector.extract_tabular(**table)
for th in table_headers:
if not th in tabular_data_headers:
tabular_data_headers[th] = len(tabular_data_headers)
if not self.config['scraping'].get('next'):
results['data'].extend(result_list)
else:
for nextx in self.config['scraping']['next']:
for tdh, r in traverse_next(selector, nextx, result, verbosity=self.args['--verbosity']):
results['data'].append(r)
for th in tdh:
if not th in tabular_data_headers:
tabular_data_headers[th] = len(tabular_data_headers)
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
finally:
if self.args['--output_type'] == 'json':
import json
with open(os.path.join(os.getcwd(), self.args['<output_filename>'] + '.json'), \
'w') as f:
json.dump(results, f, indent=4)
elif self.args['--output_type'] == 'csv':
import csv
with open(os.path.join(os.getcwd(), self.args['<output_filename>'] + '.csv'), \
'w') as f:
fields = extract_fieldnames(self.config)
data_headers = sorted(tabular_data_headers, key=lambda x:tabular_data_headers[x])
fields.extend(data_headers)
writer = csv.DictWriter(f, fieldnames=fields)
writer.writeheader()
writer.writerows(results['data'])
if self.args['--verbosity'] > 0:
print()
print(Back.WHITE + Fore.RED + self.args['<output_filename>'], \
".", self.args['--output_type'], " has been created" \
+ Back.RESET + Fore.RESET, sep="")
|
AlexMathew/scrapple
|
scrapple/utils/config.py
|
traverse_next
|
python
|
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result)
|
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L20-L58
|
[
"def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):\n \"\"\"\n Recursive generator to traverse through the next attribute and \\\n crawl through the links to be followed.\n\n :param page: The current page being parsed\n :param next: The next attribute of the current scraping dict\n :param results: The current extracted content, stored in a dict\n :return: The extracted content, through a generator\n\n \"\"\"\n for link in page.extract_links(selector=nextx['follow_link']):\n if verbosity > 0:\n print('\\n')\n print(Back.YELLOW + Fore.BLUE + \"Loading page \", link.url + Back.RESET + Fore.RESET, end='')\n r = results.copy()\n for attribute in nextx['scraping'].get('data'):\n if attribute['field'] != \"\":\n if verbosity > 1:\n print(\"\\nExtracting\", attribute['field'], \"attribute\", sep=' ', end='')\n r[attribute['field']] = link.extract_content(**attribute)\n if not nextx['scraping'].get('table'):\n result_list = [r]\n else:\n tables = nextx['scraping'].get('table', [])\n for table in tables:\n table.update({\n 'result': r,\n 'verbosity': verbosity\n })\n table_headers, result_list = link.extract_tabular(**table)\n tabular_data_headers.extend(table_headers)\n if not nextx['scraping'].get('next'):\n for r in result_list:\n yield (tabular_data_headers, r)\n else:\n for nextx2 in nextx['scraping'].get('next'):\n for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):\n yield (tdh, result)\n"
] |
"""
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
from colorama import Back, Fore, init
init()
class InvalidConfigException(Exception):
"""Exception class for invalid config file. Example: duplicate field names"""
pass
def validate_config(config):
"""
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
"""
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True
def get_fields(config):
"""
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
"""
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f
def extract_fieldnames(config):
"""
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
"""
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields
|
AlexMathew/scrapple
|
scrapple/utils/config.py
|
validate_config
|
python
|
def validate_config(config):
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True
|
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L61-L74
|
[
"def get_fields(config):\n \"\"\"\n Recursive generator that yields the field names in the config file\n\n :param config: The configuration file that contains the specification of the extractor\n :return: The field names in the config file, through a generator\n\n \"\"\"\n for data in config['scraping']['data']:\n if data['field'] != '': \n yield data['field']\n if 'next' in config['scraping']:\n for n in config['scraping']['next']:\n for f in get_fields(n): \n yield f\n"
] |
"""
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
from colorama import Back, Fore, init
init()
class InvalidConfigException(Exception):
"""Exception class for invalid config file. Example: duplicate field names"""
pass
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result)
def get_fields(config):
"""
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
"""
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f
def extract_fieldnames(config):
"""
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
"""
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields
|
AlexMathew/scrapple
|
scrapple/utils/config.py
|
get_fields
|
python
|
def get_fields(config):
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f
|
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L77-L91
|
[
"def get_fields(config):\n \"\"\"\n Recursive generator that yields the field names in the config file\n\n :param config: The configuration file that contains the specification of the extractor\n :return: The field names in the config file, through a generator\n\n \"\"\"\n for data in config['scraping']['data']:\n if data['field'] != '': \n yield data['field']\n if 'next' in config['scraping']:\n for n in config['scraping']['next']:\n for f in get_fields(n): \n yield f\n"
] |
"""
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
from colorama import Back, Fore, init
init()
class InvalidConfigException(Exception):
"""Exception class for invalid config file. Example: duplicate field names"""
pass
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result)
def validate_config(config):
"""
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
"""
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True
def extract_fieldnames(config):
"""
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
"""
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields
|
AlexMathew/scrapple
|
scrapple/utils/config.py
|
extract_fieldnames
|
python
|
def extract_fieldnames(config):
fields = []
for x in get_fields(config):
if x in fields:
fields.append(x + '_' + str(fields.count(x) + 1))
else:
fields.append(x)
return fields
|
Function to return a list of unique field names from the config file
:param config: The configuration file that contains the specification of the extractor
:return: A list of field names from the config file
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/utils/config.py#L94-L108
|
[
"def get_fields(config):\n \"\"\"\n Recursive generator that yields the field names in the config file\n\n :param config: The configuration file that contains the specification of the extractor\n :return: The field names in the config file, through a generator\n\n \"\"\"\n for data in config['scraping']['data']:\n if data['field'] != '': \n yield data['field']\n if 'next' in config['scraping']:\n for n in config['scraping']['next']:\n for f in get_fields(n): \n yield f\n"
] |
"""
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
from colorama import Back, Fore, init
init()
class InvalidConfigException(Exception):
"""Exception class for invalid config file. Example: duplicate field names"""
pass
def traverse_next(page, nextx, results, tabular_data_headers=[], verbosity=0):
"""
Recursive generator to traverse through the next attribute and \
crawl through the links to be followed.
:param page: The current page being parsed
:param next: The next attribute of the current scraping dict
:param results: The current extracted content, stored in a dict
:return: The extracted content, through a generator
"""
for link in page.extract_links(selector=nextx['follow_link']):
if verbosity > 0:
print('\n')
print(Back.YELLOW + Fore.BLUE + "Loading page ", link.url + Back.RESET + Fore.RESET, end='')
r = results.copy()
for attribute in nextx['scraping'].get('data'):
if attribute['field'] != "":
if verbosity > 1:
print("\nExtracting", attribute['field'], "attribute", sep=' ', end='')
r[attribute['field']] = link.extract_content(**attribute)
if not nextx['scraping'].get('table'):
result_list = [r]
else:
tables = nextx['scraping'].get('table', [])
for table in tables:
table.update({
'result': r,
'verbosity': verbosity
})
table_headers, result_list = link.extract_tabular(**table)
tabular_data_headers.extend(table_headers)
if not nextx['scraping'].get('next'):
for r in result_list:
yield (tabular_data_headers, r)
else:
for nextx2 in nextx['scraping'].get('next'):
for tdh, result in traverse_next(link, nextx2, r, tabular_data_headers=tabular_data_headers, verbosity=verbosity):
yield (tdh, result)
def validate_config(config):
"""
Validates the extractor configuration file. Ensures that there are no duplicate field names, etc.
:param config: The configuration file that contains the specification of the extractor
:return: True if config is valid, else raises a exception that specifies the correction to be made
"""
fields = [f for f in get_fields(config)]
if len(fields) != len(set(fields)):
raise InvalidConfigException(
"Invalid configuration file - %d duplicate field names" % len(fields) - len(set(fields))
)
return True
def get_fields(config):
"""
Recursive generator that yields the field names in the config file
:param config: The configuration file that contains the specification of the extractor
:return: The field names in the config file, through a generator
"""
for data in config['scraping']['data']:
if data['field'] != '':
yield data['field']
if 'next' in config['scraping']:
for n in config['scraping']['next']:
for f in get_fields(n):
yield f
|
AlexMathew/scrapple
|
scrapple/commands/genconfig.py
|
GenconfigCommand.execute_command
|
python
|
def execute_command(self):
print(Back.GREEN + Fore.BLACK + "Scrapple Genconfig")
print(Back.RESET + Fore.RESET)
directory = os.path.join(scrapple.__path__[0], 'templates', 'configs')
with open(os.path.join(directory, self.args['--type'] + '.txt'), 'r') as f:
template_content = f.read()
print("\n\nUsing the", self.args['--type'], "template\n\n")
template = Template(template_content)
settings = {
'projectname': self.args['<projectname>'],
'selector_type': self.args['--selector'],
'url': self.args['<url>'],
'levels': int(self.args['--levels'])
}
rendered = template.render(settings=settings)
with open(self.args['<projectname>'] + '.json', 'w') as f:
rendered_data = json.loads(rendered)
json.dump(rendered_data, f, indent=3)
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json has been created" \
+ Back.RESET + Fore.RESET, sep="")
|
The genconfig command depends on predefined `Jinja2 <http://jinja.pocoo.org/>`_ \
templates for the skeleton configuration files. Taking the --type argument from the \
CLI input, the corresponding template file is used.
Settings for the configuration file, like project name, selector type and URL \
are taken from the CLI input and using these as parameters, the template is \
rendered. This rendered JSON document is saved as <project_name>.json.
|
train
|
https://github.com/AlexMathew/scrapple/blob/eeb604601b155d6cc7e035855ff4d3f48f8bed74/scrapple/commands/genconfig.py#L28-L57
| null |
class GenconfigCommand(command.Command):
"""
Defines the execution of :ref:`genconfig <command-genconfig>`
"""
def __init__(self, args):
super(GenconfigCommand, self).__init__(args)
init()
def execute_command(self):
"""
The genconfig command depends on predefined `Jinja2 <http://jinja.pocoo.org/>`_ \
templates for the skeleton configuration files. Taking the --type argument from the \
CLI input, the corresponding template file is used.
Settings for the configuration file, like project name, selector type and URL \
are taken from the CLI input and using these as parameters, the template is \
rendered. This rendered JSON document is saved as <project_name>.json.
"""
print(Back.GREEN + Fore.BLACK + "Scrapple Genconfig")
print(Back.RESET + Fore.RESET)
directory = os.path.join(scrapple.__path__[0], 'templates', 'configs')
with open(os.path.join(directory, self.args['--type'] + '.txt'), 'r') as f:
template_content = f.read()
print("\n\nUsing the", self.args['--type'], "template\n\n")
template = Template(template_content)
settings = {
'projectname': self.args['<projectname>'],
'selector_type': self.args['--selector'],
'url': self.args['<url>'],
'levels': int(self.args['--levels'])
}
rendered = template.render(settings=settings)
with open(self.args['<projectname>'] + '.json', 'w') as f:
rendered_data = json.loads(rendered)
json.dump(rendered_data, f, indent=3)
print(Back.WHITE + Fore.RED + self.args['<projectname>'], ".json has been created" \
+ Back.RESET + Fore.RESET, sep="")
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
_parse_content_type
|
python
|
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
|
Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L24-L36
| null |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
_decode_body
|
python
|
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
|
Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L39-L57
|
[
"def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:\n \"\"\"Tease out the content-type and character encoding.\n\n A default character encoding of UTF-8 is used, so the content-type\n must be used to determine if any decoding is necessary to begin\n with.\n \"\"\"\n if not content_type:\n return None, \"utf-8\"\n else:\n type_, parameters = cgi.parse_header(content_type)\n encoding = parameters.get(\"charset\", \"utf-8\")\n return type_, encoding\n"
] |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
validate_event
|
python
|
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
|
Validate the signature of a webhook event.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L60-L71
| null |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
accept_format
|
python
|
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
|
Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L125-L145
| null |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
create_headers
|
python
|
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
|
Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L148-L186
| null |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
decipher_response
|
python
|
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
|
Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L269-L326
|
[
"def _decode_body(content_type: Optional[str], body: bytes,\n *, strict: bool = False) -> Any:\n \"\"\"Decode an HTTP body based on the specified content type.\n\n If 'strict' is true, then raise ValueError if the content type\n is not recognized. Otherwise simply returned the body as a decoded\n string.\n \"\"\"\n type_, encoding = _parse_content_type(content_type)\n if not len(body) or not content_type:\n return None\n decoded_body = body.decode(encoding)\n if type_ == \"application/json\":\n return json.loads(decoded_body)\n elif type_ == \"application/x-www-form-urlencoded\":\n return json.loads(urllib.parse.parse_qs(decoded_body)[\"payload\"][0])\n elif strict:\n raise ValueError(f\"unrecognized content type: {type_!r}\")\n return decoded_body\n",
"def _next_link(link: Optional[str]) -> Optional[str]:\n # https://developer.github.com/v3/#pagination\n # https://tools.ietf.org/html/rfc5988\n if link is None:\n return None\n for match in _link_re.finditer(link):\n if match.group(\"param_type\") == \"rel\":\n if match.group(\"param_value\") == \"next\":\n return match.group(\"uri\")\n",
"def from_http(cls, headers: Mapping[str, str]) -> Optional[\"RateLimit\"]:\n \"\"\"Gather rate limit information from HTTP headers.\n\n The mapping providing the headers is expected to support lowercase\n keys. Returns ``None`` if ratelimit info is not found in the headers.\n \"\"\"\n try:\n limit = int(headers[\"x-ratelimit-limit\"])\n remaining = int(headers[\"x-ratelimit-remaining\"])\n reset_epoch = float(headers[\"x-ratelimit-reset\"])\n except KeyError:\n return None\n else:\n return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)\n"
] |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
DOMAIN = "https://api.github.com"
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
"""Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
"""
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
format_url
|
python
|
def format_url(url: str, url_vars: Mapping[str, Any]) -> str:
url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.
expanded_url: str = uritemplate.expand(url, var_dict=url_vars)
return expanded_url
|
Construct a URL for the GitHub API.
The URL may be absolute or relative. In the latter case the appropriate
domain will be added. This is to help when copying the relative URL directly
from the GitHub developer documentation.
The dict provided in url_vars is used in URI template formatting.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L331-L342
| null |
"""Code to help with HTTP requests, responses, and events from GitHub's developer API.
This code has been constructed to perform no I/O of its own. This allows you to
use any HTTP library you prefer while not having to implement common details
when working with GitHub's API (e.g. validating webhook events or specifying the
API version you want your request to work against).
"""
import cgi
import datetime
import hashlib
import hmac
import http
import json
import re
from typing import Any, Dict, Mapping, Optional, Tuple, Type, Union
import urllib.parse
import uritemplate
from . import (BadRequest, GitHubBroken, HTTPException, InvalidField,
RateLimitExceeded, RedirectionException, ValidationFailure)
def _parse_content_type(content_type: Optional[str]) -> Tuple[Optional[str], str]:
"""Tease out the content-type and character encoding.
A default character encoding of UTF-8 is used, so the content-type
must be used to determine if any decoding is necessary to begin
with.
"""
if not content_type:
return None, "utf-8"
else:
type_, parameters = cgi.parse_header(content_type)
encoding = parameters.get("charset", "utf-8")
return type_, encoding
def _decode_body(content_type: Optional[str], body: bytes,
*, strict: bool = False) -> Any:
"""Decode an HTTP body based on the specified content type.
If 'strict' is true, then raise ValueError if the content type
is not recognized. Otherwise simply returned the body as a decoded
string.
"""
type_, encoding = _parse_content_type(content_type)
if not len(body) or not content_type:
return None
decoded_body = body.decode(encoding)
if type_ == "application/json":
return json.loads(decoded_body)
elif type_ == "application/x-www-form-urlencoded":
return json.loads(urllib.parse.parse_qs(decoded_body)["payload"][0])
elif strict:
raise ValueError(f"unrecognized content type: {type_!r}")
return decoded_body
def validate_event(payload: bytes, *, signature: str, secret: str) -> None:
"""Validate the signature of a webhook event."""
# https://developer.github.com/webhooks/securing/#validating-payloads-from-github
signature_prefix = "sha1="
if not signature.startswith(signature_prefix):
raise ValidationFailure("signature does not start with "
f"{repr(signature_prefix)}")
hmac_ = hmac.new(secret.encode("UTF-8"), msg=payload, digestmod="sha1")
calculated_sig = signature_prefix + hmac_.hexdigest()
if not hmac.compare_digest(signature, calculated_sig):
raise ValidationFailure("payload's signature does not align "
"with the secret")
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
"""Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
"""
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
def accept_format(*, version: str = "v3", media: Optional[str] = None,
json: bool = True) -> str:
"""Construct the specification of the format that a request should return.
The version argument defaults to v3 of the GitHub API and is applicable to
all requests. The media argument along with 'json' specifies what format
the request should return, e.g. requesting the rendered HTML of a comment.
Do note that not all of GitHub's API supports alternative formats.
The default arguments of this function will always return the latest stable
version of the GitHub API in the default format that this library is
designed to support.
"""
# https://developer.github.com/v3/media/
# https://developer.github.com/v3/#current-version
accept = f"application/vnd.github.{version}"
if media is not None:
accept += f".{media}"
if json:
accept += "+json"
return accept
def create_headers(requester: str, *, accept: str = accept_format(),
oauth_token: Optional[str] = None,
jwt: Optional[str] = None) -> Dict[str, str]:
"""Create a dict representing GitHub-specific header fields.
The user agent is set according to who the requester is. GitHub asks it be
either a username or project name.
The 'accept' argument corresponds to the 'accept' field and defaults to the
default result of accept_format(). You should only need to change this value
if you are using a different version of the API -- e.g. one that is under
development -- or if you are looking for a different format return type,
e.g. wanting the rendered HTML of a Markdown file.
The 'oauth_token' allows making an authenticated request using a personal access
token. This can be important if you need the expanded rate limit provided
by an authenticated request.
The 'jwt' allows authenticating as a GitHub App by passing in the
bearer token.
You can only supply only one of oauth_token or jwt, not both.
For consistency, all keys in the returned dict will be lowercased.
"""
# user-agent: https://developer.github.com/v3/#user-agent-required
# accept: https://developer.github.com/v3/#current-version
# https://developer.github.com/v3/media/
# authorization: https://developer.github.com/v3/#authentication
# authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
headers = {"user-agent": requester, "accept": accept}
if oauth_token is not None:
headers["authorization"] = f"token {oauth_token}"
elif jwt is not None:
headers["authorization"] = f"bearer {jwt}"
return headers
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
"""Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
"""
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
_link_re = re.compile(r'\<(?P<uri>[^>]+)\>;\s*'
r'(?P<param_type>\w+)="(?P<param_value>\w+)"(,\s*)?')
def _next_link(link: Optional[str]) -> Optional[str]:
# https://developer.github.com/v3/#pagination
# https://tools.ietf.org/html/rfc5988
if link is None:
return None
for match in _link_re.finditer(link):
if match.group("param_type") == "rel":
if match.group("param_value") == "next":
return match.group("uri")
else:
return None
def decipher_response(status_code: int, headers: Mapping[str, str],
body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:
"""Decipher an HTTP response for a GitHub API request.
The mapping providing the headers is expected to support lowercase keys.
The parameters of this function correspond to the three main parts
of an HTTP response: the status code, headers, and body. Assuming
no errors which lead to an exception being raised, a 3-item tuple
is returned. The first item is the decoded body (typically a JSON
object, but possibly None or a string depending on the content
type of the body). The second item is an instance of RateLimit
based on what the response specified.
The last item of the tuple is the URL where to request the next
part of results. If there are no more results then None is
returned. Do be aware that the URL can be a URI template and so
may need to be expanded.
If the status code is anything other than 200, 201, or 204, then
an HTTPException is raised.
"""
data = _decode_body(headers.get("content-type"), body)
if status_code in {200, 201, 204}:
return data, RateLimit.from_http(headers), _next_link(headers.get("link"))
else:
try:
message = data["message"]
except (TypeError, KeyError):
message = None
exc_type: Type[HTTPException]
if status_code >= 500:
exc_type = GitHubBroken
elif status_code >= 400:
exc_type = BadRequest
if status_code == 403:
rate_limit = RateLimit.from_http(headers)
if rate_limit and not rate_limit.remaining:
raise RateLimitExceeded(rate_limit, message)
elif status_code == 422:
errors = data.get("errors", None)
if errors:
fields = ", ".join(repr(e["field"]) for e in errors)
message = f"{message} for {fields}"
else:
message = data["message"]
raise InvalidField(errors, message)
elif status_code >= 300:
exc_type = RedirectionException
else:
exc_type = HTTPException
status_code_enum = http.HTTPStatus(status_code)
args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]
if message:
args = status_code_enum, message
else:
args = status_code_enum,
raise exc_type(*args)
DOMAIN = "https://api.github.com"
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
Event.from_http
|
python
|
def from_http(cls, headers: Mapping[str, str], body: bytes,
*, secret: Optional[str] = None) -> "Event":
if "x-hub-signature" in headers:
if secret is None:
raise ValidationFailure("secret not provided")
validate_event(body, signature=headers["x-hub-signature"],
secret=secret)
elif secret is not None:
raise ValidationFailure("signature is missing")
try:
data = _decode_body(headers["content-type"], body, strict=True)
except (KeyError, ValueError) as exc:
raise BadRequest(http.HTTPStatus(415),
"expected a content-type of "
"'application/json' or "
"'application/x-www-form-urlencoded'") from exc
return cls(data, event=headers["x-github-event"],
delivery_id=headers["x-github-delivery"])
|
Construct an event from HTTP headers and JSON body data.
The mapping providing the headers is expected to support lowercase keys.
Since this method assumes the body of the HTTP request is JSON, a check
is performed for a content-type of "application/json" (GitHub does
support other content-types). If the content-type does not match,
BadRequest is raised.
If the appropriate headers are provided for event validation, then it
will be performed unconditionally. Any failure in validation
(including not providing a secret) will lead to ValidationFailure being
raised.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L90-L122
|
[
"def _decode_body(content_type: Optional[str], body: bytes,\n *, strict: bool = False) -> Any:\n \"\"\"Decode an HTTP body based on the specified content type.\n\n If 'strict' is true, then raise ValueError if the content type\n is not recognized. Otherwise simply returned the body as a decoded\n string.\n \"\"\"\n type_, encoding = _parse_content_type(content_type)\n if not len(body) or not content_type:\n return None\n decoded_body = body.decode(encoding)\n if type_ == \"application/json\":\n return json.loads(decoded_body)\n elif type_ == \"application/x-www-form-urlencoded\":\n return json.loads(urllib.parse.parse_qs(decoded_body)[\"payload\"][0])\n elif strict:\n raise ValueError(f\"unrecognized content type: {type_!r}\")\n return decoded_body\n",
"def validate_event(payload: bytes, *, signature: str, secret: str) -> None:\n \"\"\"Validate the signature of a webhook event.\"\"\"\n # https://developer.github.com/webhooks/securing/#validating-payloads-from-github\n signature_prefix = \"sha1=\"\n if not signature.startswith(signature_prefix):\n raise ValidationFailure(\"signature does not start with \"\n f\"{repr(signature_prefix)}\")\n hmac_ = hmac.new(secret.encode(\"UTF-8\"), msg=payload, digestmod=\"sha1\")\n calculated_sig = signature_prefix + hmac_.hexdigest()\n if not hmac.compare_digest(signature, calculated_sig):\n raise ValidationFailure(\"payload's signature does not align \"\n \"with the secret\")\n"
] |
class Event:
"""Details of a GitHub webhook event."""
def __init__(self, data: Any, *, event: str, delivery_id: str) -> None:
# https://developer.github.com/v3/activity/events/types/
# https://developer.github.com/webhooks/#delivery-headers
self.data = data
# Event is not an enum as GitHub provides the string. This allows them
# to add new events without having to mirror them here. There's also no
# direct worry of a user typing in the wrong event name and thus no need
# for an enum's typing protection.
self.event = event
self.delivery_id = delivery_id
@classmethod
|
brettcannon/gidgethub
|
gidgethub/sansio.py
|
RateLimit.from_http
|
python
|
def from_http(cls, headers: Mapping[str, str]) -> Optional["RateLimit"]:
try:
limit = int(headers["x-ratelimit-limit"])
remaining = int(headers["x-ratelimit-remaining"])
reset_epoch = float(headers["x-ratelimit-reset"])
except KeyError:
return None
else:
return cls(limit=limit, remaining=remaining, reset_epoch=reset_epoch)
|
Gather rate limit information from HTTP headers.
The mapping providing the headers is expected to support lowercase
keys. Returns ``None`` if ratelimit info is not found in the headers.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/sansio.py#L237-L250
| null |
class RateLimit:
"""The rate limit imposed upon the requester.
The 'limit' attribute specifies the rate of requests per hour the client is
limited to.
The 'remaining' attribute specifies how many requests remain within the
current rate limit that the client can make.
The reset_datetime attribute is a datetime object representing when
effectively 'left' resets to 'rate'. The datetime object is timezone-aware
and set to UTC.
The boolean value of an instance whether another request can be made. This
is determined based on whether there are any remaining requests or if the
reset datetime has passed.
"""
# https://developer.github.com/v3/#rate-limiting
def __init__(self, *, limit: int, remaining: int, reset_epoch: float) -> None:
"""Instantiate a RateLimit object.
The reset_epoch argument should be in seconds since the UTC epoch.
"""
# Instance attribute names stem from the name GitHub uses in their
# API documentation.
self.limit = limit
self.remaining = remaining
# Name specifies the type to remind users that the epoch is not stored
# as an int as the GitHub API returns.
self.reset_datetime = datetime.datetime.fromtimestamp(reset_epoch,
datetime.timezone.utc)
def __bool__(self) -> bool:
"""True if requests are remaining or the reset datetime has passed."""
if self.remaining > 0:
return True
else:
now = datetime.datetime.now(datetime.timezone.utc)
return now > self.reset_datetime
def __str__(self) -> str:
"""Provide all details in a reasonable format."""
return f"< {self.remaining:,}/{self.limit:,} until {self.reset_datetime} >"
@classmethod
|
brettcannon/gidgethub
|
gidgethub/routing.py
|
Router.add
|
python
|
def add(self, func: AsyncCallback, event_type: str, **data_detail: Any) -> None:
if len(data_detail) > 1:
msg = ()
raise TypeError("dispatching based on data details is only "
"supported up to one level deep; "
f"{len(data_detail)} levels specified")
elif not data_detail:
callbacks = self._shallow_routes.setdefault(event_type, [])
callbacks.append(func)
else:
data_key, data_value = data_detail.popitem()
data_details = self._deep_routes.setdefault(event_type, {})
specific_detail = data_details.setdefault(data_key, {})
callbacks = specific_detail.setdefault(data_value, [])
callbacks.append(func)
|
Add a new route.
After registering 'func' for the specified event_type, an
optional data_detail may be provided. By providing an extra
keyword argument, dispatching can occur based on a top-level
key of the data in the event being dispatched.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/routing.py#L29-L50
| null |
class Router:
"""Route webhook events to registered functions."""
def __init__(self, *other_routers: "Router") -> None:
"""Instantiate a new router (possibly from other routers)."""
self._shallow_routes: Dict[str, List[AsyncCallback]] = {}
# event type -> data key -> data value -> callbacks
self._deep_routes: Dict[str, Dict[str, Dict[Any, List[AsyncCallback]]]] = {}
for other_router in other_routers:
for event_type, callbacks in other_router._shallow_routes.items():
for callback in callbacks:
self.add(callback, event_type)
for event_type, data_details in other_router._deep_routes.items():
for data_key, data_specifics in data_details.items():
for data_value, callbacks in data_specifics.items():
detail = {data_key: data_value}
for callback in callbacks:
self.add(callback, event_type, **detail)
def register(self, event_type: str,
**data_detail: Any) -> Callable[[AsyncCallback], AsyncCallback]:
"""Decorator to apply the add() method to a function."""
def decorator(func: AsyncCallback) -> AsyncCallback:
self.add(func, event_type, **data_detail)
return func
return decorator
async def dispatch(self, event: sansio.Event, *args: Any,
**kwargs: Any) -> None:
"""Dispatch an event to all registered function(s)."""
found_callbacks = []
try:
found_callbacks.extend(self._shallow_routes[event.event])
except KeyError:
pass
try:
details = self._deep_routes[event.event]
except KeyError:
pass
else:
for data_key, data_values in details.items():
if data_key in event.data:
event_value = event.data[data_key]
if event_value in data_values:
found_callbacks.extend(data_values[event_value])
for callback in found_callbacks:
await callback(event, *args, **kwargs)
|
brettcannon/gidgethub
|
gidgethub/routing.py
|
Router.register
|
python
|
def register(self, event_type: str,
**data_detail: Any) -> Callable[[AsyncCallback], AsyncCallback]:
def decorator(func: AsyncCallback) -> AsyncCallback:
self.add(func, event_type, **data_detail)
return func
return decorator
|
Decorator to apply the add() method to a function.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/routing.py#L52-L58
| null |
class Router:
"""Route webhook events to registered functions."""
def __init__(self, *other_routers: "Router") -> None:
"""Instantiate a new router (possibly from other routers)."""
self._shallow_routes: Dict[str, List[AsyncCallback]] = {}
# event type -> data key -> data value -> callbacks
self._deep_routes: Dict[str, Dict[str, Dict[Any, List[AsyncCallback]]]] = {}
for other_router in other_routers:
for event_type, callbacks in other_router._shallow_routes.items():
for callback in callbacks:
self.add(callback, event_type)
for event_type, data_details in other_router._deep_routes.items():
for data_key, data_specifics in data_details.items():
for data_value, callbacks in data_specifics.items():
detail = {data_key: data_value}
for callback in callbacks:
self.add(callback, event_type, **detail)
def add(self, func: AsyncCallback, event_type: str, **data_detail: Any) -> None:
"""Add a new route.
After registering 'func' for the specified event_type, an
optional data_detail may be provided. By providing an extra
keyword argument, dispatching can occur based on a top-level
key of the data in the event being dispatched.
"""
if len(data_detail) > 1:
msg = ()
raise TypeError("dispatching based on data details is only "
"supported up to one level deep; "
f"{len(data_detail)} levels specified")
elif not data_detail:
callbacks = self._shallow_routes.setdefault(event_type, [])
callbacks.append(func)
else:
data_key, data_value = data_detail.popitem()
data_details = self._deep_routes.setdefault(event_type, {})
specific_detail = data_details.setdefault(data_key, {})
callbacks = specific_detail.setdefault(data_value, [])
callbacks.append(func)
async def dispatch(self, event: sansio.Event, *args: Any,
**kwargs: Any) -> None:
"""Dispatch an event to all registered function(s)."""
found_callbacks = []
try:
found_callbacks.extend(self._shallow_routes[event.event])
except KeyError:
pass
try:
details = self._deep_routes[event.event]
except KeyError:
pass
else:
for data_key, data_values in details.items():
if data_key in event.data:
event_value = event.data[data_key]
if event_value in data_values:
found_callbacks.extend(data_values[event_value])
for callback in found_callbacks:
await callback(event, *args, **kwargs)
|
brettcannon/gidgethub
|
gidgethub/routing.py
|
Router.dispatch
|
python
|
async def dispatch(self, event: sansio.Event, *args: Any,
**kwargs: Any) -> None:
found_callbacks = []
try:
found_callbacks.extend(self._shallow_routes[event.event])
except KeyError:
pass
try:
details = self._deep_routes[event.event]
except KeyError:
pass
else:
for data_key, data_values in details.items():
if data_key in event.data:
event_value = event.data[data_key]
if event_value in data_values:
found_callbacks.extend(data_values[event_value])
for callback in found_callbacks:
await callback(event, *args, **kwargs)
|
Dispatch an event to all registered function(s).
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/routing.py#L60-L80
| null |
class Router:
"""Route webhook events to registered functions."""
def __init__(self, *other_routers: "Router") -> None:
"""Instantiate a new router (possibly from other routers)."""
self._shallow_routes: Dict[str, List[AsyncCallback]] = {}
# event type -> data key -> data value -> callbacks
self._deep_routes: Dict[str, Dict[str, Dict[Any, List[AsyncCallback]]]] = {}
for other_router in other_routers:
for event_type, callbacks in other_router._shallow_routes.items():
for callback in callbacks:
self.add(callback, event_type)
for event_type, data_details in other_router._deep_routes.items():
for data_key, data_specifics in data_details.items():
for data_value, callbacks in data_specifics.items():
detail = {data_key: data_value}
for callback in callbacks:
self.add(callback, event_type, **detail)
def add(self, func: AsyncCallback, event_type: str, **data_detail: Any) -> None:
"""Add a new route.
After registering 'func' for the specified event_type, an
optional data_detail may be provided. By providing an extra
keyword argument, dispatching can occur based on a top-level
key of the data in the event being dispatched.
"""
if len(data_detail) > 1:
msg = ()
raise TypeError("dispatching based on data details is only "
"supported up to one level deep; "
f"{len(data_detail)} levels specified")
elif not data_detail:
callbacks = self._shallow_routes.setdefault(event_type, [])
callbacks.append(func)
else:
data_key, data_value = data_detail.popitem()
data_details = self._deep_routes.setdefault(event_type, {})
specific_detail = data_details.setdefault(data_key, {})
callbacks = specific_detail.setdefault(data_value, [])
callbacks.append(func)
def register(self, event_type: str,
**data_detail: Any) -> Callable[[AsyncCallback], AsyncCallback]:
"""Decorator to apply the add() method to a function."""
def decorator(func: AsyncCallback) -> AsyncCallback:
self.add(func, event_type, **data_detail)
return func
return decorator
|
brettcannon/gidgethub
|
gidgethub/abc.py
|
GitHubAPI._request
|
python
|
async def _request(self, method: str, url: str, headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
|
Make an HTTP request.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L26-L28
| null |
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: Opt[str] = None,
cache: Opt[CACHE_TYPE] = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self._cache = cache
self.rate_limit: Opt[sansio.RateLimit] = None
@abc.abstractmethod
@abc.abstractmethod
async def sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],
data: Any, accept: str,
jwt: Opt[str] = None,
oauth_token: Opt[str] = None,
) -> Tuple[bytes, Opt[str]]:
"""Construct and make an HTTP request."""
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
filled_url = sansio.format_url(url, url_vars)
if jwt is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
jwt=jwt)
elif oauth_token is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=oauth_token)
else:
# fallback to using oauth_token
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=self.oauth_token)
cached = cacheable = False
# Can't use None as a "no body" sentinel as it's a legitimate JSON type.
if data == b"":
body = b""
request_headers["content-length"] = "0"
if method == "GET" and self._cache is not None:
cacheable = True
try:
etag, last_modified, data, more = self._cache[filled_url]
cached = True
except KeyError:
pass
else:
if etag is not None:
request_headers["if-none-match"] = etag
if last_modified is not None:
request_headers["if-modified-since"] = last_modified
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
if self.rate_limit is not None:
self.rate_limit.remaining -= 1
response = await self._request(method, filled_url, request_headers, body)
if not (response[0] == 304 and cached):
data, self.rate_limit, more = sansio.decipher_response(*response)
has_cache_details = ("etag" in response[1]
or "last-modified" in response[1])
if self._cache is not None and cacheable and has_cache_details:
etag = response[1].get("etag")
last_modified = response[1].get("last-modified")
self._cache[filled_url] = etag, last_modified, data, more
return data, more
async def getitem(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
"""Send a GET request for a single item to the specified endpoint."""
data, _ = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item
async def post(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PUT", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> None:
await self._make_request("DELETE", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
|
brettcannon/gidgethub
|
gidgethub/abc.py
|
GitHubAPI._make_request
|
python
|
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],
data: Any, accept: str,
jwt: Opt[str] = None,
oauth_token: Opt[str] = None,
) -> Tuple[bytes, Opt[str]]:
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
filled_url = sansio.format_url(url, url_vars)
if jwt is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
jwt=jwt)
elif oauth_token is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=oauth_token)
else:
# fallback to using oauth_token
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=self.oauth_token)
cached = cacheable = False
# Can't use None as a "no body" sentinel as it's a legitimate JSON type.
if data == b"":
body = b""
request_headers["content-length"] = "0"
if method == "GET" and self._cache is not None:
cacheable = True
try:
etag, last_modified, data, more = self._cache[filled_url]
cached = True
except KeyError:
pass
else:
if etag is not None:
request_headers["if-none-match"] = etag
if last_modified is not None:
request_headers["if-modified-since"] = last_modified
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
if self.rate_limit is not None:
self.rate_limit.remaining -= 1
response = await self._request(method, filled_url, request_headers, body)
if not (response[0] == 304 and cached):
data, self.rate_limit, more = sansio.decipher_response(*response)
has_cache_details = ("etag" in response[1]
or "last-modified" in response[1])
if self._cache is not None and cacheable and has_cache_details:
etag = response[1].get("etag")
last_modified = response[1].get("last-modified")
self._cache[filled_url] = etag, last_modified, data, more
return data, more
|
Construct and make an HTTP request.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L34-L89
|
[
"def format_url(url: str, url_vars: Mapping[str, Any]) -> str:\n \"\"\"Construct a URL for the GitHub API.\n\n The URL may be absolute or relative. In the latter case the appropriate\n domain will be added. This is to help when copying the relative URL directly\n from the GitHub developer documentation.\n\n The dict provided in url_vars is used in URI template formatting.\n \"\"\"\n url = urllib.parse.urljoin(DOMAIN, url) # Works even if 'url' is fully-qualified.\n expanded_url: str = uritemplate.expand(url, var_dict=url_vars)\n return expanded_url\n",
"def create_headers(requester: str, *, accept: str = accept_format(),\n oauth_token: Optional[str] = None,\n jwt: Optional[str] = None) -> Dict[str, str]:\n \"\"\"Create a dict representing GitHub-specific header fields.\n\n The user agent is set according to who the requester is. GitHub asks it be\n either a username or project name.\n\n The 'accept' argument corresponds to the 'accept' field and defaults to the\n default result of accept_format(). You should only need to change this value\n if you are using a different version of the API -- e.g. one that is under\n development -- or if you are looking for a different format return type,\n e.g. wanting the rendered HTML of a Markdown file.\n\n The 'oauth_token' allows making an authenticated request using a personal access\n token. This can be important if you need the expanded rate limit provided\n by an authenticated request.\n\n The 'jwt' allows authenticating as a GitHub App by passing in the\n bearer token.\n\n You can only supply only one of oauth_token or jwt, not both.\n\n For consistency, all keys in the returned dict will be lowercased.\n \"\"\"\n # user-agent: https://developer.github.com/v3/#user-agent-required\n # accept: https://developer.github.com/v3/#current-version\n # https://developer.github.com/v3/media/\n # authorization: https://developer.github.com/v3/#authentication\n # authenticating as a GitHub App: https://developer.github.com/apps/building-github-apps/authenticating-with-github-apps/#authenticating-as-a-github-app\n\n if oauth_token is not None and jwt is not None:\n raise ValueError(\"Cannot pass both oauth_token and jwt.\")\n headers = {\"user-agent\": requester, \"accept\": accept}\n if oauth_token is not None:\n headers[\"authorization\"] = f\"token {oauth_token}\"\n elif jwt is not None:\n headers[\"authorization\"] = f\"bearer {jwt}\"\n return headers\n",
"def decipher_response(status_code: int, headers: Mapping[str, str],\n body: bytes) -> Tuple[Any, Optional[RateLimit], Optional[str]]:\n \"\"\"Decipher an HTTP response for a GitHub API request.\n\n The mapping providing the headers is expected to support lowercase keys.\n\n The parameters of this function correspond to the three main parts\n of an HTTP response: the status code, headers, and body. Assuming\n no errors which lead to an exception being raised, a 3-item tuple\n is returned. The first item is the decoded body (typically a JSON\n object, but possibly None or a string depending on the content\n type of the body). The second item is an instance of RateLimit\n based on what the response specified.\n\n The last item of the tuple is the URL where to request the next\n part of results. If there are no more results then None is\n returned. Do be aware that the URL can be a URI template and so\n may need to be expanded.\n\n If the status code is anything other than 200, 201, or 204, then\n an HTTPException is raised.\n \"\"\"\n data = _decode_body(headers.get(\"content-type\"), body)\n if status_code in {200, 201, 204}:\n return data, RateLimit.from_http(headers), _next_link(headers.get(\"link\"))\n else:\n try:\n message = data[\"message\"]\n except (TypeError, KeyError):\n message = None\n exc_type: Type[HTTPException]\n if status_code >= 500:\n exc_type = GitHubBroken\n elif status_code >= 400:\n exc_type = BadRequest\n if status_code == 403:\n rate_limit = RateLimit.from_http(headers)\n if rate_limit and not rate_limit.remaining:\n raise RateLimitExceeded(rate_limit, message)\n elif status_code == 422:\n errors = data.get(\"errors\", None)\n if errors:\n fields = \", \".join(repr(e[\"field\"]) for e in errors)\n message = f\"{message} for {fields}\"\n else:\n message = data[\"message\"]\n raise InvalidField(errors, message)\n elif status_code >= 300:\n exc_type = RedirectionException\n else:\n exc_type = HTTPException\n status_code_enum = http.HTTPStatus(status_code)\n args: Union[Tuple[http.HTTPStatus, str], Tuple[http.HTTPStatus]]\n if message:\n args = status_code_enum, message\n else:\n args = status_code_enum,\n raise exc_type(*args)\n",
"async def _request(self, method: str, url: str, headers: Mapping[str, str],\n body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:\n \"\"\"Make an HTTP request.\"\"\"\n",
"async def _request(self, method, url, headers, body=b''):\n \"\"\"Make an HTTP request.\"\"\"\n self.method = method\n self.url = url\n self.headers = headers\n self.body = body\n response_headers = self.response_headers.copy()\n try:\n # Don't loop forever.\n del self.response_headers[\"link\"]\n except KeyError:\n pass\n return self.response_code, response_headers, self.response_body\n"
] |
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: Opt[str] = None,
cache: Opt[CACHE_TYPE] = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self._cache = cache
self.rate_limit: Opt[sansio.RateLimit] = None
@abc.abstractmethod
async def _request(self, method: str, url: str, headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
@abc.abstractmethod
async def sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def getitem(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
"""Send a GET request for a single item to the specified endpoint."""
data, _ = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item
async def post(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PUT", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> None:
await self._make_request("DELETE", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
|
brettcannon/gidgethub
|
gidgethub/abc.py
|
GitHubAPI.getitem
|
python
|
async def getitem(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
return data
|
Send a GET request for a single item to the specified endpoint.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L91-L100
|
[
"async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],\n data: Any, accept: str,\n jwt: Opt[str] = None,\n oauth_token: Opt[str] = None,\n ) -> Tuple[bytes, Opt[str]]:\n \"\"\"Construct and make an HTTP request.\"\"\"\n if oauth_token is not None and jwt is not None:\n raise ValueError(\"Cannot pass both oauth_token and jwt.\")\n filled_url = sansio.format_url(url, url_vars)\n if jwt is not None:\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n jwt=jwt)\n elif oauth_token is not None:\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n oauth_token=oauth_token)\n else:\n # fallback to using oauth_token\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n oauth_token=self.oauth_token)\n cached = cacheable = False\n # Can't use None as a \"no body\" sentinel as it's a legitimate JSON type.\n if data == b\"\":\n body = b\"\"\n request_headers[\"content-length\"] = \"0\"\n if method == \"GET\" and self._cache is not None:\n cacheable = True\n try:\n etag, last_modified, data, more = self._cache[filled_url]\n cached = True\n except KeyError:\n pass\n else:\n if etag is not None:\n request_headers[\"if-none-match\"] = etag\n if last_modified is not None:\n request_headers[\"if-modified-since\"] = last_modified\n else:\n charset = \"utf-8\"\n body = json.dumps(data).encode(charset)\n request_headers['content-type'] = f\"application/json; charset={charset}\"\n request_headers['content-length'] = str(len(body))\n if self.rate_limit is not None:\n self.rate_limit.remaining -= 1\n response = await self._request(method, filled_url, request_headers, body)\n if not (response[0] == 304 and cached):\n data, self.rate_limit, more = sansio.decipher_response(*response)\n has_cache_details = (\"etag\" in response[1]\n or \"last-modified\" in response[1])\n if self._cache is not None and cacheable and has_cache_details:\n etag = response[1].get(\"etag\")\n last_modified = response[1].get(\"last-modified\")\n self._cache[filled_url] = etag, last_modified, data, more\n return data, more\n"
] |
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: Opt[str] = None,
cache: Opt[CACHE_TYPE] = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self._cache = cache
self.rate_limit: Opt[sansio.RateLimit] = None
@abc.abstractmethod
async def _request(self, method: str, url: str, headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
@abc.abstractmethod
async def sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],
data: Any, accept: str,
jwt: Opt[str] = None,
oauth_token: Opt[str] = None,
) -> Tuple[bytes, Opt[str]]:
"""Construct and make an HTTP request."""
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
filled_url = sansio.format_url(url, url_vars)
if jwt is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
jwt=jwt)
elif oauth_token is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=oauth_token)
else:
# fallback to using oauth_token
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=self.oauth_token)
cached = cacheable = False
# Can't use None as a "no body" sentinel as it's a legitimate JSON type.
if data == b"":
body = b""
request_headers["content-length"] = "0"
if method == "GET" and self._cache is not None:
cacheable = True
try:
etag, last_modified, data, more = self._cache[filled_url]
cached = True
except KeyError:
pass
else:
if etag is not None:
request_headers["if-none-match"] = etag
if last_modified is not None:
request_headers["if-modified-since"] = last_modified
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
if self.rate_limit is not None:
self.rate_limit.remaining -= 1
response = await self._request(method, filled_url, request_headers, body)
if not (response[0] == 304 and cached):
data, self.rate_limit, more = sansio.decipher_response(*response)
has_cache_details = ("etag" in response[1]
or "last-modified" in response[1])
if self._cache is not None and cacheable and has_cache_details:
etag = response[1].get("etag")
last_modified = response[1].get("last-modified")
self._cache[filled_url] = etag, last_modified, data, more
return data, more
async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
"""Return an async iterable for all the items at a specified endpoint."""
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item
async def post(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PUT", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> None:
await self._make_request("DELETE", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
|
brettcannon/gidgethub
|
gidgethub/abc.py
|
GitHubAPI.getiter
|
python
|
async def getiter(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> AsyncGenerator[Any, None]:
data, more = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
if isinstance(data, dict) and "items" in data:
data = data["items"]
for item in data:
yield item
if more:
# `yield from` is not supported in coroutines.
async for item in self.getiter(more, url_vars, accept=accept,
jwt=jwt, oauth_token=oauth_token):
yield item
|
Return an async iterable for all the items at a specified endpoint.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/abc.py#L102-L120
|
[
"async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],\n data: Any, accept: str,\n jwt: Opt[str] = None,\n oauth_token: Opt[str] = None,\n ) -> Tuple[bytes, Opt[str]]:\n \"\"\"Construct and make an HTTP request.\"\"\"\n if oauth_token is not None and jwt is not None:\n raise ValueError(\"Cannot pass both oauth_token and jwt.\")\n filled_url = sansio.format_url(url, url_vars)\n if jwt is not None:\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n jwt=jwt)\n elif oauth_token is not None:\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n oauth_token=oauth_token)\n else:\n # fallback to using oauth_token\n request_headers = sansio.create_headers(\n self.requester, accept=accept,\n oauth_token=self.oauth_token)\n cached = cacheable = False\n # Can't use None as a \"no body\" sentinel as it's a legitimate JSON type.\n if data == b\"\":\n body = b\"\"\n request_headers[\"content-length\"] = \"0\"\n if method == \"GET\" and self._cache is not None:\n cacheable = True\n try:\n etag, last_modified, data, more = self._cache[filled_url]\n cached = True\n except KeyError:\n pass\n else:\n if etag is not None:\n request_headers[\"if-none-match\"] = etag\n if last_modified is not None:\n request_headers[\"if-modified-since\"] = last_modified\n else:\n charset = \"utf-8\"\n body = json.dumps(data).encode(charset)\n request_headers['content-type'] = f\"application/json; charset={charset}\"\n request_headers['content-length'] = str(len(body))\n if self.rate_limit is not None:\n self.rate_limit.remaining -= 1\n response = await self._request(method, filled_url, request_headers, body)\n if not (response[0] == 304 and cached):\n data, self.rate_limit, more = sansio.decipher_response(*response)\n has_cache_details = (\"etag\" in response[1]\n or \"last-modified\" in response[1])\n if self._cache is not None and cacheable and has_cache_details:\n etag = response[1].get(\"etag\")\n last_modified = response[1].get(\"last-modified\")\n self._cache[filled_url] = etag, last_modified, data, more\n return data, more\n"
] |
class GitHubAPI(abc.ABC):
"""Provide an idiomatic API for making calls to GitHub's API."""
def __init__(self, requester: str, *, oauth_token: Opt[str] = None,
cache: Opt[CACHE_TYPE] = None) -> None:
self.requester = requester
self.oauth_token = oauth_token
self._cache = cache
self.rate_limit: Opt[sansio.RateLimit] = None
@abc.abstractmethod
async def _request(self, method: str, url: str, headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
"""Make an HTTP request."""
@abc.abstractmethod
async def sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
async def _make_request(self, method: str, url: str, url_vars: Dict[str, str],
data: Any, accept: str,
jwt: Opt[str] = None,
oauth_token: Opt[str] = None,
) -> Tuple[bytes, Opt[str]]:
"""Construct and make an HTTP request."""
if oauth_token is not None and jwt is not None:
raise ValueError("Cannot pass both oauth_token and jwt.")
filled_url = sansio.format_url(url, url_vars)
if jwt is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
jwt=jwt)
elif oauth_token is not None:
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=oauth_token)
else:
# fallback to using oauth_token
request_headers = sansio.create_headers(
self.requester, accept=accept,
oauth_token=self.oauth_token)
cached = cacheable = False
# Can't use None as a "no body" sentinel as it's a legitimate JSON type.
if data == b"":
body = b""
request_headers["content-length"] = "0"
if method == "GET" and self._cache is not None:
cacheable = True
try:
etag, last_modified, data, more = self._cache[filled_url]
cached = True
except KeyError:
pass
else:
if etag is not None:
request_headers["if-none-match"] = etag
if last_modified is not None:
request_headers["if-modified-since"] = last_modified
else:
charset = "utf-8"
body = json.dumps(data).encode(charset)
request_headers['content-type'] = f"application/json; charset={charset}"
request_headers['content-length'] = str(len(body))
if self.rate_limit is not None:
self.rate_limit.remaining -= 1
response = await self._request(method, filled_url, request_headers, body)
if not (response[0] == 304 and cached):
data, self.rate_limit, more = sansio.decipher_response(*response)
has_cache_details = ("etag" in response[1]
or "last-modified" in response[1])
if self._cache is not None and cacheable and has_cache_details:
etag = response[1].get("etag")
last_modified = response[1].get("last-modified")
self._cache[filled_url] = etag, last_modified, data, more
return data, more
async def getitem(self, url: str, url_vars: Dict[str, str] = {},
*, accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
"""Send a GET request for a single item to the specified endpoint."""
data, _ = await self._make_request("GET", url, url_vars, b"", accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def post(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("POST", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def patch(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any,
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PATCH", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def put(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> Any:
data, _ = await self._make_request("PUT", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
return data
async def delete(self, url: str, url_vars: Dict[str, str] = {}, *, data: Any = b"",
accept: str = sansio.accept_format(),
jwt: Opt[str] = None,
oauth_token: Opt[str] = None
) -> None:
await self._make_request("DELETE", url, url_vars, data, accept,
jwt=jwt, oauth_token=oauth_token)
|
brettcannon/gidgethub
|
gidgethub/tornado.py
|
GitHubAPI._request
|
python
|
async def _request(self, method: str, url: str, headers: Mapping[str, str],
body: bytes = b'') -> Tuple[int, Mapping[str, str], bytes]:
if method == "GET" and not body:
real_body = None
else:
real_body = body
request = httpclient.HTTPRequest(url, method, headers, real_body)
# Since Tornado has designed AsyncHTTPClient to be a singleton, there's
# no reason not to simply instantiate it every time.
client = httpclient.AsyncHTTPClient()
response = await client.fetch(request, raise_error=False)
return response.code, response.headers, response.body
|
Make an HTTP request.
|
train
|
https://github.com/brettcannon/gidgethub/blob/24feb6c35bba3966c6cc9ec2896729578f6d7ccc/gidgethub/tornado.py#L11-L23
| null |
class GitHubAPI(gh_abc.GitHubAPI):
async def sleep(self, seconds: float) -> None:
"""Sleep for the specified number of seconds."""
await gen.sleep(seconds)
|
sarugaku/vistir
|
src/vistir/compat.py
|
is_bytes
|
python
|
def is_bytes(string):
if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa
return True
elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa
return True
return False
|
Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/compat.py#L205-L216
| null |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import codecs
import errno
import os
import sys
import warnings
from tempfile import mkdtemp
import six
from .backports.tempfile import NamedTemporaryFile as _NamedTemporaryFile
__all__ = [
"Path",
"get_terminal_size",
"finalize",
"partialmethod",
"JSONDecodeError",
"FileNotFoundError",
"ResourceWarning",
"PermissionError",
"is_type_checking",
"IS_TYPE_CHECKING",
"IsADirectoryError",
"fs_str",
"lru_cache",
"TemporaryDirectory",
"NamedTemporaryFile",
"to_native_string",
"Iterable",
"Mapping",
"Sequence",
"Set",
"ItemsView",
"fs_encode",
"fs_decode",
"_fs_encode_errors",
"_fs_decode_errors",
]
if sys.version_info >= (3, 5):
from pathlib import Path
else:
from pathlib2 import Path
if six.PY3:
# Only Python 3.4+ is supported
from functools import lru_cache, partialmethod
from tempfile import NamedTemporaryFile
from shutil import get_terminal_size
from weakref import finalize
else:
# Only Python 2.7 is supported
from backports.functools_lru_cache import lru_cache
from .backports.functools import partialmethod # type: ignore
from backports.shutil_get_terminal_size import get_terminal_size
from .backports.surrogateescape import register_surrogateescape
register_surrogateescape()
NamedTemporaryFile = _NamedTemporaryFile
from backports.weakref import finalize # type: ignore
try:
# Introduced Python 3.5
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError # type: ignore
if six.PY2:
from io import BytesIO as StringIO
class ResourceWarning(Warning):
pass
class FileNotFoundError(IOError):
"""No such file or directory"""
def __init__(self, *args, **kwargs):
self.errno = errno.ENOENT
super(FileNotFoundError, self).__init__(*args, **kwargs)
class PermissionError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EACCES
super(PermissionError, self).__init__(*args, **kwargs)
class IsADirectoryError(OSError):
"""The command does not work on directories"""
pass
class FileExistsError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EEXIST
super(FileExistsError, self).__init__(*args, **kwargs)
else:
from builtins import (
ResourceWarning,
FileNotFoundError,
PermissionError,
IsADirectoryError,
FileExistsError,
)
from io import StringIO
six.add_move(
six.MovedAttribute("Iterable", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Mapping", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Sequence", "collections", "collections.abc")
) # type: ignore
six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # type: ignore
six.add_move(
six.MovedAttribute("ItemsView", "collections", "collections.abc")
) # type: ignore
# fmt: off
from six.moves import ItemsView, Iterable, Mapping, Sequence, Set # type: ignore # noqa # isort:skip
# fmt: on
if not sys.warnoptions:
warnings.simplefilter("default", ResourceWarning)
def is_type_checking():
try:
from typing import TYPE_CHECKING
except ImportError:
return False
return TYPE_CHECKING
IS_TYPE_CHECKING = is_type_checking()
class TemporaryDirectory(object):
"""
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=None, dir=None):
if "RAM_DISK" in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ["RAM_DISK"].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
suffix = suffix if suffix else ""
if not prefix:
self.name = mkdtemp(suffix=suffix, dir=dir)
else:
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _rmtree(cls, name):
from .path import rmtree
rmtree(name)
@classmethod
def _cleanup(cls, name, warn_message):
cls._rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
self._rmtree(self.name)
def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string
assert not isinstance(string, bytes)
return string.encode(_fs_encoding)
def _get_path(path):
"""
Fetch the string value from a path-like object
Returns **None** if there is no string value.
"""
if isinstance(path, (six.string_types, bytes)):
return path
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
return
if isinstance(path_repr, (six.string_types, bytes)):
return path_repr
return
# copied from the os backport which in turn copied this from
# the pyutf8 package --
# URL: https://github.com/etrepum/pyutf8/blob/master/pyutf8/ref.py
#
def _invalid_utf8_indexes(bytes):
skips = []
i = 0
len_bytes = len(bytes)
while i < len_bytes:
c1 = bytes[i]
if c1 < 0x80:
# U+0000 - U+007F - 7 bits
i += 1
continue
try:
c2 = bytes[i + 1]
if (c1 & 0xE0 == 0xC0) and (c2 & 0xC0 == 0x80):
# U+0080 - U+07FF - 11 bits
c = ((c1 & 0x1F) << 6) | (c2 & 0x3F)
if c < 0x80: # pragma: no cover
# Overlong encoding
skips.extend([i, i + 1]) # pragma: no cover
i += 2
continue
c3 = bytes[i + 2]
if (c1 & 0xF0 == 0xE0) and (c2 & 0xC0 == 0x80) and (c3 & 0xC0 == 0x80):
# U+0800 - U+FFFF - 16 bits
c = ((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)
if (c < 0x800) or (0xD800 <= c <= 0xDFFF):
# Overlong encoding or surrogate.
skips.extend([i, i + 1, i + 2])
i += 3
continue
c4 = bytes[i + 3]
if (
(c1 & 0xF8 == 0xF0)
and (c2 & 0xC0 == 0x80)
and (c3 & 0xC0 == 0x80)
and (c4 & 0xC0 == 0x80)
):
# U+10000 - U+10FFFF - 21 bits
c = ((((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)) << 6) | (
c4 & 0x3F
)
if (c < 0x10000) or (c > 0x10FFFF): # pragma: no cover
# Overlong encoding or invalid code point.
skips.extend([i, i + 1, i + 2, i + 3])
i += 4
continue
except IndexError:
pass
skips.append(i)
i += 1
return skips
# XXX backport: Another helper to support the Python 2 UTF-8 decoding hack.
def _chunks(b, indexes):
i = 0
for j in indexes:
yield b[i:j]
yield b[j : j + 1]
i = j + 1
yield b[i:]
def fs_encode(path):
"""
Encode a filesystem path to the proper filesystem encoding
:param Union[str, bytes] path: A string-like path
:returns: A bytes-encoded filesystem path representation
"""
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to encode")
if isinstance(path, six.text_type):
if six.PY2:
return b"".join(
(
_byte(ord(c) - 0xDC00)
if 0xDC00 <= ord(c) <= 0xDCFF
else c.encode(_fs_encoding, _fs_encode_errors)
)
for c in path
)
return path.encode(_fs_encoding, _fs_encode_errors)
return path
def fs_decode(path):
"""
Decode a filesystem path using the proper filesystem encoding
:param path: The filesystem path to decode from bytes or string
:return: The filesystem path, decoded with the determined encoding
:rtype: Text
"""
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to decode")
if isinstance(path, six.binary_type):
if six.PY2:
from array import array
indexes = _invalid_utf8_indexes(array(str("B"), path))
return "".join(
chunk.decode(_fs_encoding, _fs_decode_errors)
for chunk in _chunks(path, indexes)
)
return path.decode(_fs_encoding, _fs_decode_errors)
return path
if sys.version_info[0] < 3:
_fs_encode_errors = "surrogateescape"
_fs_decode_errors = "surrogateescape"
_fs_encoding = "utf-8"
else:
_fs_encoding = "utf-8"
if sys.platform.startswith("win"):
_fs_error_fn = None
if sys.version_info[:2] > (3, 4):
alt_strategy = "surrogatepass"
else:
alt_strategy = "surrogateescape"
else:
if sys.version_info >= (3, 3):
_fs_encoding = sys.getfilesystemencoding()
if not _fs_encoding:
_fs_encoding = sys.getdefaultencoding()
alt_strategy = "surrogateescape"
_fs_error_fn = getattr(sys, "getfilesystemencodeerrors", None)
_fs_encode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_fs_decode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_byte = chr if sys.version_info < (3,) else lambda i: bytes([i])
def to_native_string(string):
from .misc import to_text, to_bytes
if six.PY2:
return to_bytes(string)
return to_text(string)
|
sarugaku/vistir
|
src/vistir/compat.py
|
fs_encode
|
python
|
def fs_encode(path):
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to encode")
if isinstance(path, six.text_type):
if six.PY2:
return b"".join(
(
_byte(ord(c) - 0xDC00)
if 0xDC00 <= ord(c) <= 0xDCFF
else c.encode(_fs_encoding, _fs_encode_errors)
)
for c in path
)
return path.encode(_fs_encoding, _fs_encode_errors)
return path
|
Encode a filesystem path to the proper filesystem encoding
:param Union[str, bytes] path: A string-like path
:returns: A bytes-encoded filesystem path representation
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/compat.py#L316-L338
|
[
"def _get_path(path):\n \"\"\"\n Fetch the string value from a path-like object\n\n Returns **None** if there is no string value.\n \"\"\"\n\n if isinstance(path, (six.string_types, bytes)):\n return path\n path_type = type(path)\n try:\n path_repr = path_type.__fspath__(path)\n except AttributeError:\n return\n if isinstance(path_repr, (six.string_types, bytes)):\n return path_repr\n return\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import codecs
import errno
import os
import sys
import warnings
from tempfile import mkdtemp
import six
from .backports.tempfile import NamedTemporaryFile as _NamedTemporaryFile
__all__ = [
"Path",
"get_terminal_size",
"finalize",
"partialmethod",
"JSONDecodeError",
"FileNotFoundError",
"ResourceWarning",
"PermissionError",
"is_type_checking",
"IS_TYPE_CHECKING",
"IsADirectoryError",
"fs_str",
"lru_cache",
"TemporaryDirectory",
"NamedTemporaryFile",
"to_native_string",
"Iterable",
"Mapping",
"Sequence",
"Set",
"ItemsView",
"fs_encode",
"fs_decode",
"_fs_encode_errors",
"_fs_decode_errors",
]
if sys.version_info >= (3, 5):
from pathlib import Path
else:
from pathlib2 import Path
if six.PY3:
# Only Python 3.4+ is supported
from functools import lru_cache, partialmethod
from tempfile import NamedTemporaryFile
from shutil import get_terminal_size
from weakref import finalize
else:
# Only Python 2.7 is supported
from backports.functools_lru_cache import lru_cache
from .backports.functools import partialmethod # type: ignore
from backports.shutil_get_terminal_size import get_terminal_size
from .backports.surrogateescape import register_surrogateescape
register_surrogateescape()
NamedTemporaryFile = _NamedTemporaryFile
from backports.weakref import finalize # type: ignore
try:
# Introduced Python 3.5
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError # type: ignore
if six.PY2:
from io import BytesIO as StringIO
class ResourceWarning(Warning):
pass
class FileNotFoundError(IOError):
"""No such file or directory"""
def __init__(self, *args, **kwargs):
self.errno = errno.ENOENT
super(FileNotFoundError, self).__init__(*args, **kwargs)
class PermissionError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EACCES
super(PermissionError, self).__init__(*args, **kwargs)
class IsADirectoryError(OSError):
"""The command does not work on directories"""
pass
class FileExistsError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EEXIST
super(FileExistsError, self).__init__(*args, **kwargs)
else:
from builtins import (
ResourceWarning,
FileNotFoundError,
PermissionError,
IsADirectoryError,
FileExistsError,
)
from io import StringIO
six.add_move(
six.MovedAttribute("Iterable", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Mapping", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Sequence", "collections", "collections.abc")
) # type: ignore
six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # type: ignore
six.add_move(
six.MovedAttribute("ItemsView", "collections", "collections.abc")
) # type: ignore
# fmt: off
from six.moves import ItemsView, Iterable, Mapping, Sequence, Set # type: ignore # noqa # isort:skip
# fmt: on
if not sys.warnoptions:
warnings.simplefilter("default", ResourceWarning)
def is_type_checking():
try:
from typing import TYPE_CHECKING
except ImportError:
return False
return TYPE_CHECKING
IS_TYPE_CHECKING = is_type_checking()
class TemporaryDirectory(object):
"""
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=None, dir=None):
if "RAM_DISK" in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ["RAM_DISK"].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
suffix = suffix if suffix else ""
if not prefix:
self.name = mkdtemp(suffix=suffix, dir=dir)
else:
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _rmtree(cls, name):
from .path import rmtree
rmtree(name)
@classmethod
def _cleanup(cls, name, warn_message):
cls._rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
self._rmtree(self.name)
def is_bytes(string):
"""Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool
"""
if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa
return True
elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa
return True
return False
def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string
assert not isinstance(string, bytes)
return string.encode(_fs_encoding)
def _get_path(path):
"""
Fetch the string value from a path-like object
Returns **None** if there is no string value.
"""
if isinstance(path, (six.string_types, bytes)):
return path
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
return
if isinstance(path_repr, (six.string_types, bytes)):
return path_repr
return
# copied from the os backport which in turn copied this from
# the pyutf8 package --
# URL: https://github.com/etrepum/pyutf8/blob/master/pyutf8/ref.py
#
def _invalid_utf8_indexes(bytes):
skips = []
i = 0
len_bytes = len(bytes)
while i < len_bytes:
c1 = bytes[i]
if c1 < 0x80:
# U+0000 - U+007F - 7 bits
i += 1
continue
try:
c2 = bytes[i + 1]
if (c1 & 0xE0 == 0xC0) and (c2 & 0xC0 == 0x80):
# U+0080 - U+07FF - 11 bits
c = ((c1 & 0x1F) << 6) | (c2 & 0x3F)
if c < 0x80: # pragma: no cover
# Overlong encoding
skips.extend([i, i + 1]) # pragma: no cover
i += 2
continue
c3 = bytes[i + 2]
if (c1 & 0xF0 == 0xE0) and (c2 & 0xC0 == 0x80) and (c3 & 0xC0 == 0x80):
# U+0800 - U+FFFF - 16 bits
c = ((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)
if (c < 0x800) or (0xD800 <= c <= 0xDFFF):
# Overlong encoding or surrogate.
skips.extend([i, i + 1, i + 2])
i += 3
continue
c4 = bytes[i + 3]
if (
(c1 & 0xF8 == 0xF0)
and (c2 & 0xC0 == 0x80)
and (c3 & 0xC0 == 0x80)
and (c4 & 0xC0 == 0x80)
):
# U+10000 - U+10FFFF - 21 bits
c = ((((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)) << 6) | (
c4 & 0x3F
)
if (c < 0x10000) or (c > 0x10FFFF): # pragma: no cover
# Overlong encoding or invalid code point.
skips.extend([i, i + 1, i + 2, i + 3])
i += 4
continue
except IndexError:
pass
skips.append(i)
i += 1
return skips
# XXX backport: Another helper to support the Python 2 UTF-8 decoding hack.
def _chunks(b, indexes):
i = 0
for j in indexes:
yield b[i:j]
yield b[j : j + 1]
i = j + 1
yield b[i:]
def fs_decode(path):
"""
Decode a filesystem path using the proper filesystem encoding
:param path: The filesystem path to decode from bytes or string
:return: The filesystem path, decoded with the determined encoding
:rtype: Text
"""
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to decode")
if isinstance(path, six.binary_type):
if six.PY2:
from array import array
indexes = _invalid_utf8_indexes(array(str("B"), path))
return "".join(
chunk.decode(_fs_encoding, _fs_decode_errors)
for chunk in _chunks(path, indexes)
)
return path.decode(_fs_encoding, _fs_decode_errors)
return path
if sys.version_info[0] < 3:
_fs_encode_errors = "surrogateescape"
_fs_decode_errors = "surrogateescape"
_fs_encoding = "utf-8"
else:
_fs_encoding = "utf-8"
if sys.platform.startswith("win"):
_fs_error_fn = None
if sys.version_info[:2] > (3, 4):
alt_strategy = "surrogatepass"
else:
alt_strategy = "surrogateescape"
else:
if sys.version_info >= (3, 3):
_fs_encoding = sys.getfilesystemencoding()
if not _fs_encoding:
_fs_encoding = sys.getdefaultencoding()
alt_strategy = "surrogateescape"
_fs_error_fn = getattr(sys, "getfilesystemencodeerrors", None)
_fs_encode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_fs_decode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_byte = chr if sys.version_info < (3,) else lambda i: bytes([i])
def to_native_string(string):
from .misc import to_text, to_bytes
if six.PY2:
return to_bytes(string)
return to_text(string)
|
sarugaku/vistir
|
src/vistir/compat.py
|
fs_decode
|
python
|
def fs_decode(path):
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to decode")
if isinstance(path, six.binary_type):
if six.PY2:
from array import array
indexes = _invalid_utf8_indexes(array(str("B"), path))
return "".join(
chunk.decode(_fs_encoding, _fs_decode_errors)
for chunk in _chunks(path, indexes)
)
return path.decode(_fs_encoding, _fs_decode_errors)
return path
|
Decode a filesystem path using the proper filesystem encoding
:param path: The filesystem path to decode from bytes or string
:return: The filesystem path, decoded with the determined encoding
:rtype: Text
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/compat.py#L341-L363
|
[
"def _get_path(path):\n \"\"\"\n Fetch the string value from a path-like object\n\n Returns **None** if there is no string value.\n \"\"\"\n\n if isinstance(path, (six.string_types, bytes)):\n return path\n path_type = type(path)\n try:\n path_repr = path_type.__fspath__(path)\n except AttributeError:\n return\n if isinstance(path_repr, (six.string_types, bytes)):\n return path_repr\n return\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import codecs
import errno
import os
import sys
import warnings
from tempfile import mkdtemp
import six
from .backports.tempfile import NamedTemporaryFile as _NamedTemporaryFile
__all__ = [
"Path",
"get_terminal_size",
"finalize",
"partialmethod",
"JSONDecodeError",
"FileNotFoundError",
"ResourceWarning",
"PermissionError",
"is_type_checking",
"IS_TYPE_CHECKING",
"IsADirectoryError",
"fs_str",
"lru_cache",
"TemporaryDirectory",
"NamedTemporaryFile",
"to_native_string",
"Iterable",
"Mapping",
"Sequence",
"Set",
"ItemsView",
"fs_encode",
"fs_decode",
"_fs_encode_errors",
"_fs_decode_errors",
]
if sys.version_info >= (3, 5):
from pathlib import Path
else:
from pathlib2 import Path
if six.PY3:
# Only Python 3.4+ is supported
from functools import lru_cache, partialmethod
from tempfile import NamedTemporaryFile
from shutil import get_terminal_size
from weakref import finalize
else:
# Only Python 2.7 is supported
from backports.functools_lru_cache import lru_cache
from .backports.functools import partialmethod # type: ignore
from backports.shutil_get_terminal_size import get_terminal_size
from .backports.surrogateescape import register_surrogateescape
register_surrogateescape()
NamedTemporaryFile = _NamedTemporaryFile
from backports.weakref import finalize # type: ignore
try:
# Introduced Python 3.5
from json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError # type: ignore
if six.PY2:
from io import BytesIO as StringIO
class ResourceWarning(Warning):
pass
class FileNotFoundError(IOError):
"""No such file or directory"""
def __init__(self, *args, **kwargs):
self.errno = errno.ENOENT
super(FileNotFoundError, self).__init__(*args, **kwargs)
class PermissionError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EACCES
super(PermissionError, self).__init__(*args, **kwargs)
class IsADirectoryError(OSError):
"""The command does not work on directories"""
pass
class FileExistsError(OSError):
def __init__(self, *args, **kwargs):
self.errno = errno.EEXIST
super(FileExistsError, self).__init__(*args, **kwargs)
else:
from builtins import (
ResourceWarning,
FileNotFoundError,
PermissionError,
IsADirectoryError,
FileExistsError,
)
from io import StringIO
six.add_move(
six.MovedAttribute("Iterable", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Mapping", "collections", "collections.abc")
) # type: ignore
six.add_move(
six.MovedAttribute("Sequence", "collections", "collections.abc")
) # type: ignore
six.add_move(six.MovedAttribute("Set", "collections", "collections.abc")) # type: ignore
six.add_move(
six.MovedAttribute("ItemsView", "collections", "collections.abc")
) # type: ignore
# fmt: off
from six.moves import ItemsView, Iterable, Mapping, Sequence, Set # type: ignore # noqa # isort:skip
# fmt: on
if not sys.warnoptions:
warnings.simplefilter("default", ResourceWarning)
def is_type_checking():
try:
from typing import TYPE_CHECKING
except ImportError:
return False
return TYPE_CHECKING
IS_TYPE_CHECKING = is_type_checking()
class TemporaryDirectory(object):
"""
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=None, dir=None):
if "RAM_DISK" in os.environ:
import uuid
name = uuid.uuid4().hex
dir_name = os.path.join(os.environ["RAM_DISK"].strip(), name)
os.mkdir(dir_name)
self.name = dir_name
else:
suffix = suffix if suffix else ""
if not prefix:
self.name = mkdtemp(suffix=suffix, dir=dir)
else:
self.name = mkdtemp(suffix, prefix, dir)
self._finalizer = finalize(
self,
self._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
@classmethod
def _rmtree(cls, name):
from .path import rmtree
rmtree(name)
@classmethod
def _cleanup(cls, name, warn_message):
cls._rmtree(name)
warnings.warn(warn_message, ResourceWarning)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self._finalizer.detach():
self._rmtree(self.name)
def is_bytes(string):
"""Check if a string is a bytes instance
:param Union[str, bytes] string: A string that may be string or bytes like
:return: Whether the provided string is a bytes type or not
:rtype: bool
"""
if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa
return True
elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa
return True
return False
def fs_str(string):
"""Encodes a string into the proper filesystem encoding
Borrowed from pip-tools
"""
if isinstance(string, str):
return string
assert not isinstance(string, bytes)
return string.encode(_fs_encoding)
def _get_path(path):
"""
Fetch the string value from a path-like object
Returns **None** if there is no string value.
"""
if isinstance(path, (six.string_types, bytes)):
return path
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
return
if isinstance(path_repr, (six.string_types, bytes)):
return path_repr
return
# copied from the os backport which in turn copied this from
# the pyutf8 package --
# URL: https://github.com/etrepum/pyutf8/blob/master/pyutf8/ref.py
#
def _invalid_utf8_indexes(bytes):
skips = []
i = 0
len_bytes = len(bytes)
while i < len_bytes:
c1 = bytes[i]
if c1 < 0x80:
# U+0000 - U+007F - 7 bits
i += 1
continue
try:
c2 = bytes[i + 1]
if (c1 & 0xE0 == 0xC0) and (c2 & 0xC0 == 0x80):
# U+0080 - U+07FF - 11 bits
c = ((c1 & 0x1F) << 6) | (c2 & 0x3F)
if c < 0x80: # pragma: no cover
# Overlong encoding
skips.extend([i, i + 1]) # pragma: no cover
i += 2
continue
c3 = bytes[i + 2]
if (c1 & 0xF0 == 0xE0) and (c2 & 0xC0 == 0x80) and (c3 & 0xC0 == 0x80):
# U+0800 - U+FFFF - 16 bits
c = ((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)
if (c < 0x800) or (0xD800 <= c <= 0xDFFF):
# Overlong encoding or surrogate.
skips.extend([i, i + 1, i + 2])
i += 3
continue
c4 = bytes[i + 3]
if (
(c1 & 0xF8 == 0xF0)
and (c2 & 0xC0 == 0x80)
and (c3 & 0xC0 == 0x80)
and (c4 & 0xC0 == 0x80)
):
# U+10000 - U+10FFFF - 21 bits
c = ((((((c1 & 0x0F) << 6) | (c2 & 0x3F)) << 6) | (c3 & 0x3F)) << 6) | (
c4 & 0x3F
)
if (c < 0x10000) or (c > 0x10FFFF): # pragma: no cover
# Overlong encoding or invalid code point.
skips.extend([i, i + 1, i + 2, i + 3])
i += 4
continue
except IndexError:
pass
skips.append(i)
i += 1
return skips
# XXX backport: Another helper to support the Python 2 UTF-8 decoding hack.
def _chunks(b, indexes):
i = 0
for j in indexes:
yield b[i:j]
yield b[j : j + 1]
i = j + 1
yield b[i:]
def fs_encode(path):
"""
Encode a filesystem path to the proper filesystem encoding
:param Union[str, bytes] path: A string-like path
:returns: A bytes-encoded filesystem path representation
"""
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to encode")
if isinstance(path, six.text_type):
if six.PY2:
return b"".join(
(
_byte(ord(c) - 0xDC00)
if 0xDC00 <= ord(c) <= 0xDCFF
else c.encode(_fs_encoding, _fs_encode_errors)
)
for c in path
)
return path.encode(_fs_encoding, _fs_encode_errors)
return path
if sys.version_info[0] < 3:
_fs_encode_errors = "surrogateescape"
_fs_decode_errors = "surrogateescape"
_fs_encoding = "utf-8"
else:
_fs_encoding = "utf-8"
if sys.platform.startswith("win"):
_fs_error_fn = None
if sys.version_info[:2] > (3, 4):
alt_strategy = "surrogatepass"
else:
alt_strategy = "surrogateescape"
else:
if sys.version_info >= (3, 3):
_fs_encoding = sys.getfilesystemencoding()
if not _fs_encoding:
_fs_encoding = sys.getdefaultencoding()
alt_strategy = "surrogateescape"
_fs_error_fn = getattr(sys, "getfilesystemencodeerrors", None)
_fs_encode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_fs_decode_errors = _fs_error_fn() if _fs_error_fn else alt_strategy
_byte = chr if sys.version_info < (3,) else lambda i: bytes([i])
def to_native_string(string):
from .misc import to_text, to_bytes
if six.PY2:
return to_bytes(string)
return to_text(string)
|
sarugaku/vistir
|
src/vistir/termcolors.py
|
colored
|
python
|
def colored(text, color=None, on_color=None, attrs=None):
return colorize(text, fg=color, bg=on_color, attrs=attrs)
|
Colorize text using a reimplementation of the colorizer from
https://github.com/pavdmyt/yaspin so that it works on windows.
Available text colors:
red, green, yellow, blue, magenta, cyan, white.
Available text highlights:
on_red, on_green, on_yellow, on_blue, on_magenta, on_cyan, on_white.
Available attributes:
bold, dark, underline, blink, reverse, concealed.
Example:
colored('Hello, World!', 'red', 'on_grey', ['blue', 'blink'])
colored('Hello, World!', 'green')
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/termcolors.py#L57-L74
|
[
"def colorize(text, fg=None, bg=None, attrs=None):\n if os.getenv(\"ANSI_COLORS_DISABLED\") is None:\n style = \"NORMAL\"\n if attrs is not None and not isinstance(attrs, list):\n _attrs = []\n if isinstance(attrs, six.string_types):\n _attrs.append(attrs)\n else:\n _attrs = list(attrs)\n attrs = _attrs\n if attrs and \"bold\" in attrs:\n style = \"BRIGHT\"\n attrs.remove(\"bold\")\n if fg is not None:\n fg = fg.upper()\n text = to_native_string(\"%s%s%s%s%s\") % (\n to_native_string(getattr(colorama.Fore, fg)),\n to_native_string(getattr(colorama.Style, style)),\n to_native_string(text),\n to_native_string(colorama.Fore.RESET),\n to_native_string(colorama.Style.NORMAL),\n )\n\n if bg is not None:\n bg = bg.upper()\n text = to_native_string(\"%s%s%s%s\") % (\n to_native_string(getattr(colorama.Back, bg)),\n to_native_string(text),\n to_native_string(colorama.Back.RESET),\n to_native_string(colorama.Style.NORMAL),\n )\n\n if attrs is not None:\n fmt_str = to_native_string(\"%s[%%dm%%s%s[9m\") % (chr(27), chr(27))\n for attr in attrs:\n text = fmt_str % (ATTRIBUTES[attr], text)\n\n text += RESET\n else:\n text = ANSI_REMOVAL_RE.sub(\"\", text)\n return text\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import colorama
import six
from .compat import to_native_string
DISABLE_COLORS = os.getenv("CI", False) or os.getenv(
"ANSI_COLORS_DISABLED", os.getenv("VISTIR_DISABLE_COLORS", False)
)
ATTRIBUTE_NAMES = ["bold", "dark", "", "underline", "blink", "", "reverse", "concealed"]
ATTRIBUTES = dict(zip(ATTRIBUTE_NAMES, range(1, 9)))
del ATTRIBUTES[""]
colors = ["grey", "red", "green", "yellow", "blue", "magenta", "cyan", "white"]
COLORS = dict(zip(colors, range(30, 38)))
HIGHLIGHTS = dict(zip(["on_{0}".format(c) for c in colors], range(40, 48)))
ANSI_REMOVAL_RE = re.compile(r"\033\[((?:\d|;)*)([a-zA-Z])")
COLOR_MAP = {
# name: type
"blink": "attrs",
"bold": "attrs",
"concealed": "attrs",
"dark": "attrs",
"reverse": "attrs",
"underline": "attrs",
"blue": "color",
"cyan": "color",
"green": "color",
"magenta": "color",
"red": "color",
"white": "color",
"yellow": "color",
"on_blue": "on_color",
"on_cyan": "on_color",
"on_green": "on_color",
"on_grey": "on_color",
"on_magenta": "on_color",
"on_red": "on_color",
"on_white": "on_color",
"on_yellow": "on_color",
}
COLOR_ATTRS = COLOR_MAP.keys()
RESET = colorama.Style.RESET_ALL
def colorize(text, fg=None, bg=None, attrs=None):
if os.getenv("ANSI_COLORS_DISABLED") is None:
style = "NORMAL"
if attrs is not None and not isinstance(attrs, list):
_attrs = []
if isinstance(attrs, six.string_types):
_attrs.append(attrs)
else:
_attrs = list(attrs)
attrs = _attrs
if attrs and "bold" in attrs:
style = "BRIGHT"
attrs.remove("bold")
if fg is not None:
fg = fg.upper()
text = to_native_string("%s%s%s%s%s") % (
to_native_string(getattr(colorama.Fore, fg)),
to_native_string(getattr(colorama.Style, style)),
to_native_string(text),
to_native_string(colorama.Fore.RESET),
to_native_string(colorama.Style.NORMAL),
)
if bg is not None:
bg = bg.upper()
text = to_native_string("%s%s%s%s") % (
to_native_string(getattr(colorama.Back, bg)),
to_native_string(text),
to_native_string(colorama.Back.RESET),
to_native_string(colorama.Style.NORMAL),
)
if attrs is not None:
fmt_str = to_native_string("%s[%%dm%%s%s[9m") % (chr(27), chr(27))
for attr in attrs:
text = fmt_str % (ATTRIBUTES[attr], text)
text += RESET
else:
text = ANSI_REMOVAL_RE.sub("", text)
return text
def cprint(text, color=None, on_color=None, attrs=None, **kwargs):
"""Print colorize text.
It accepts arguments of print function.
"""
print((colored(text, color, on_color, attrs)), **kwargs)
|
sarugaku/vistir
|
src/vistir/misc.py
|
partialclass
|
python
|
def partialclass(cls, *args, **kwargs):
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
|
Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L371-L404
| null |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
to_bytes
|
python
|
def to_bytes(string, encoding="utf-8", errors=None):
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
|
Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L409-L447
|
[
"def get_canonical_encoding_name(name):\n # type: (str) -> str\n \"\"\"\n Given an encoding name, get the canonical name from a codec lookup.\n\n :param str name: The name of the codec to lookup\n :return: The canonical version of the codec name\n :rtype: str\n \"\"\"\n\n import codecs\n\n try:\n codec = codecs.lookup(name)\n except LookupError:\n return name\n else:\n return codec.name\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
to_text
|
python
|
def to_text(string, encoding="utf-8", errors=None):
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
|
Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L450-L487
|
[
"def get_canonical_encoding_name(name):\n # type: (str) -> str\n \"\"\"\n Given an encoding name, get the canonical name from a codec lookup.\n\n :param str name: The name of the codec to lookup\n :return: The canonical version of the codec name\n :rtype: str\n \"\"\"\n\n import codecs\n\n try:\n codec = codecs.lookup(name)\n except LookupError:\n return name\n else:\n return codec.name\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
get_wrapped_stream
|
python
|
def get_wrapped_stream(stream, encoding=None, errors="replace"):
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
|
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L660-L680
|
[
"def get_canonical_encoding_name(name):\n # type: (str) -> str\n \"\"\"\n Given an encoding name, get the canonical name from a codec lookup.\n\n :param str name: The name of the codec to lookup\n :return: The canonical version of the codec name\n :rtype: str\n \"\"\"\n\n import codecs\n\n try:\n codec = codecs.lookup(name)\n except LookupError:\n return name\n else:\n return codec.name\n",
"def get_output_encoding(source_encoding):\n \"\"\"\n Given a source encoding, determine the preferred output encoding.\n\n :param str source_encoding: The encoding of the source material.\n :returns: The output encoding to decode to.\n :rtype: str\n \"\"\"\n\n if source_encoding is not None:\n if get_canonical_encoding_name(source_encoding) == \"ascii\":\n return \"utf-8\"\n return get_canonical_encoding_name(source_encoding)\n return get_canonical_encoding_name(PREFERRED_ENCODING)\n",
"def _get_binary_buffer(stream):\n if six.PY3 and not _is_binary_buffer(stream):\n stream = getattr(stream, \"buffer\", None)\n if stream is not None and _is_binary_buffer(stream):\n return stream\n return stream\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
get_text_stream
|
python
|
def get_text_stream(stream="stdout", encoding=None):
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
|
Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L863-L886
|
[
"def get_wrapped_stream(stream, encoding=None, errors=\"replace\"):\n \"\"\"\n Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.\n\n :param stream: A stream instance to wrap\n :param str encoding: The encoding to use for the stream\n :param str errors: The error handler to use, default \"replace\"\n :returns: A new, wrapped stream\n :rtype: :class:`StreamWrapper`\n \"\"\"\n\n if stream is None:\n raise TypeError(\"must provide a stream to wrap\")\n stream = _get_binary_buffer(stream)\n if stream is not None and encoding is None:\n encoding = \"utf-8\"\n if not encoding:\n encoding = get_output_encoding(stream)\n else:\n encoding = get_canonical_encoding_name(encoding)\n return StreamWrapper(stream, encoding, errors, line_buffering=True)\n",
"def _get_windows_console_stream(f, encoding, errors):\n if (\n get_buffer is not None\n and encoding in (\"utf-16-le\", None)\n and errors in (\"strict\", None)\n and hasattr(f, \"isatty\")\n and f.isatty()\n ):\n if isinstance(f, ConsoleStream):\n return f\n func = _stream_factories.get(f.fileno())\n if func is not None:\n if not PY2:\n f = getattr(f, \"buffer\", None)\n if f is None:\n return None\n else:\n # If we are on Python 2 we need to set the stream that we\n # deal with to binary mode as otherwise the exercise if a\n # bit moot. The same problems apply as for\n # get_binary_stdin and friends from _compat.\n msvcrt.setmode(f.fileno(), os.O_BINARY)\n return func(f)\n",
"_get_windows_console_stream = lambda *args: None # noqa\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
replace_with_text_stream
|
python
|
def replace_with_text_stream(stream_name):
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
|
Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L913-L923
| null |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
"""Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
"""
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
sarugaku/vistir
|
src/vistir/misc.py
|
echo
|
python
|
def echo(text, fg=None, bg=None, style=None, file=None, err=False, color=None):
if file and not hasattr(file, "write"):
raise TypeError("Expected a writable stream, received {0!r}".format(file))
if not file:
if err:
file = _text_stderr()
else:
file = _text_stdout()
if text and not isinstance(text, (six.string_types, bytes, bytearray)):
text = six.text_type(text)
text = "" if not text else text
if isinstance(text, six.text_type):
text += "\n"
else:
text += b"\n"
if text and six.PY3 and is_bytes(text):
buffer = _get_binary_buffer(file)
if buffer is not None:
file.flush()
buffer.write(text)
buffer.flush()
return
if text and not is_bytes(text):
can_use_color = _can_use_color(file, color=color)
if any([fg, bg, style]):
text = colorize(text, fg=fg, bg=bg, attrs=style)
if not can_use_color or (os.name == "nt" and not _wrap_for_color):
text = ANSI_REMOVAL_RE.sub("", text)
elif os.name == "nt" and _wrap_for_color:
file = _wrap_for_color(file, color=color)
if text:
file.write(text)
file.flush()
|
Write the given text to the provided stream or **sys.stdout** by default.
Provides optional foreground and background colors from the ansi defaults:
**grey**, **red**, **green**, **yellow**, **blue**, **magenta**, **cyan**
or **white**.
Available styles include **bold**, **dark**, **underline**, **blink**, **reverse**,
**concealed**
:param str text: Text to write
:param str fg: Foreground color to use (default: None)
:param str bg: Foreground color to use (default: None)
:param str style: Style to use (default: None)
:param stream file: File to write to (default: None)
:param bool color: Whether to force color (i.e. ANSI codes are in the text)
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/misc.py#L938-L987
|
[
"def is_bytes(string):\n \"\"\"Check if a string is a bytes instance\n\n :param Union[str, bytes] string: A string that may be string or bytes like\n :return: Whether the provided string is a bytes type or not\n :rtype: bool\n \"\"\"\n if six.PY3 and isinstance(string, (bytes, memoryview, bytearray)): # noqa\n return True\n elif six.PY2 and isinstance(string, (buffer, bytearray)): # noqa\n return True\n return False\n",
"def _get_binary_buffer(stream):\n if six.PY3 and not _is_binary_buffer(stream):\n stream = getattr(stream, \"buffer\", None)\n if stream is not None and _is_binary_buffer(stream):\n return stream\n return stream\n",
"def lookup():\n stream = stream_lookup_func()\n result = None\n if stream in stream_cache:\n result = stream_cache.get(stream, None)\n if result is not None:\n return result\n result = stream_resolution_func()\n try:\n stream = stream_lookup_func()\n stream_cache[stream] = result\n except Exception:\n pass\n return result\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import io
import json
import locale
import logging
import os
import subprocess
import sys
from collections import OrderedDict
from functools import partial
from itertools import islice, tee
from weakref import WeakKeyDictionary
import six
from .cmdparse import Script
from .compat import (
Iterable,
Path,
StringIO,
fs_str,
is_bytes,
partialmethod,
to_native_string,
)
from .contextmanagers import spinner as spinner
from .termcolors import ANSI_REMOVAL_RE, colorize
if os.name != "nt":
class WindowsError(OSError):
pass
__all__ = [
"shell_escape",
"unnest",
"dedup",
"run",
"load_path",
"partialclass",
"to_text",
"to_bytes",
"locale_encoding",
"chunked",
"take",
"divide",
"getpreferredencoding",
"decode_for_output",
"get_canonical_encoding_name",
"get_wrapped_stream",
"StreamWrapper",
]
def _get_logger(name=None, level="ERROR"):
if not name:
name = __name__
if isinstance(level, six.string_types):
level = getattr(logging, level.upper())
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(
"%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def shell_escape(cmd):
"""Escape strings for use in :func:`~subprocess.Popen` and :func:`run`.
This is a passthrough method for instantiating a :class:`~vistir.cmdparse.Script`
object which can be used to escape commands to output as a single string.
"""
cmd = Script.parse(cmd)
return cmd.cmdify()
def unnest(elem):
"""Flatten an arbitrarily nested iterable
:param elem: An iterable to flatten
:type elem: :class:`~collections.Iterable`
>>> nested_iterable = (1234, (3456, 4398345, (234234)), (2396, (23895750, 9283798, 29384, (289375983275, 293759, 2347, (2098, 7987, 27599)))))
>>> list(vistir.misc.unnest(nested_iterable))
[1234, 3456, 4398345, 234234, 2396, 23895750, 9283798, 29384, 289375983275, 293759, 2347, 2098, 7987, 27599]
"""
if isinstance(elem, Iterable) and not isinstance(elem, six.string_types):
elem, target = tee(elem, 2)
else:
target = elem
for el in target:
if isinstance(el, Iterable) and not isinstance(el, six.string_types):
el, el_copy = tee(el, 2)
for sub in unnest(el_copy):
yield sub
else:
yield el
def _is_iterable(elem):
if getattr(elem, "__iter__", False):
return True
return False
def dedup(iterable):
"""Deduplicate an iterable object like iter(set(iterable)) but
order-reserved.
"""
return iter(OrderedDict.fromkeys(iterable))
def _spawn_subprocess(script, env=None, block=True, cwd=None, combine_stderr=True):
from distutils.spawn import find_executable
if not env:
env = os.environ.copy()
command = find_executable(script.command)
options = {
"env": env,
"universal_newlines": True,
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE if not combine_stderr else subprocess.STDOUT,
"shell": False,
}
if not block:
options["stdin"] = subprocess.PIPE
if cwd:
options["cwd"] = cwd
# Command not found, maybe this is a shell built-in?
cmd = [command] + script.args
if not command: # Try to use CreateProcess directly if possible.
cmd = script.cmdify()
options["shell"] = True
# Try to use CreateProcess directly if possible. Specifically catch
# Windows error 193 "Command is not a valid Win32 application" to handle
# a "command" that is non-executable. See pypa/pipenv#2727.
try:
return subprocess.Popen(cmd, **options)
except WindowsError as e:
if getattr(e, "winerror", 9999) != 193:
raise
options["shell"] = True
# Try shell mode to use Windows's file association for file launch.
return subprocess.Popen(script.cmdify(), **options)
def _read_streams(stream_dict):
results = {}
for outstream in stream_dict.keys():
stream = stream_dict[outstream]
if not stream:
results[outstream] = None
continue
line = to_text(stream.readline())
if not line:
results[outstream] = None
continue
line = to_text("{0}".format(line.rstrip()))
results[outstream] = line
return results
def get_stream_results(cmd_instance, verbose, maxlen, spinner=None, stdout_allowed=False):
stream_results = {"stdout": [], "stderr": []}
streams = {"stderr": cmd_instance.stderr, "stdout": cmd_instance.stdout}
while True:
stream_contents = _read_streams(streams)
stdout_line = stream_contents["stdout"]
stderr_line = stream_contents["stderr"]
if not (stdout_line or stderr_line):
break
for stream_name in stream_contents.keys():
if stream_contents[stream_name] and stream_name in stream_results:
line = stream_contents[stream_name]
stream_results[stream_name].append(line)
display_line = fs_str("{0}".format(line))
if len(display_line) > maxlen:
display_line = "{0}...".format(display_line[:maxlen])
if verbose:
use_stderr = not stdout_allowed or stream_name != "stdout"
if spinner:
target = spinner.stderr if use_stderr else spinner.stdout
spinner.hide_and_write(display_line, target=target)
else:
target = sys.stderr if use_stderr else sys.stdout
target.write(display_line)
target.flush()
if spinner:
spinner.text = to_native_string(
"{0} {1}".format(spinner.text, display_line)
)
continue
return stream_results
def _create_subprocess(
cmd,
env=None,
block=True,
return_object=False,
cwd=os.curdir,
verbose=False,
spinner=None,
combine_stderr=False,
display_limit=200,
start_text="",
write_to_stdout=True,
):
if not env:
env = os.environ.copy()
try:
c = _spawn_subprocess(
cmd, env=env, block=block, cwd=cwd, combine_stderr=combine_stderr
)
except Exception as exc:
import traceback
formatted_tb = "".join(traceback.format_exception(*sys.exc_info())) # pragma: no cover
sys.stderr.write( # pragma: no cover
"Error while executing command %s:" % to_native_string(" ".join(cmd._parts)) # pragma: no cover
) # pragma: no cover
sys.stderr.write(formatted_tb) # pragma: no cover
raise exc # pragma: no cover
if not block:
c.stdin.close()
spinner_orig_text = ""
if spinner and getattr(spinner, "text", None) is not None:
spinner_orig_text = spinner.text
if not spinner_orig_text and start_text is not None:
spinner_orig_text = start_text
stream_results = get_stream_results(
c,
verbose=verbose,
maxlen=display_limit,
spinner=spinner,
stdout_allowed=write_to_stdout,
)
try:
c.wait()
finally:
if c.stdout:
c.stdout.close()
if c.stderr:
c.stderr.close()
if spinner:
if c.returncode > 0:
spinner.fail(to_native_string("Failed...cleaning up..."))
if not os.name == "nt":
spinner.ok(to_native_string("✔ Complete"))
else:
spinner.ok(to_native_string("Complete"))
output = stream_results["stdout"]
err = stream_results["stderr"]
c.out = "\n".join(output) if output else ""
c.err = "\n".join(err) if err else ""
else:
c.out, c.err = c.communicate()
if not block:
c.wait()
c.out = to_text("{0}".format(c.out)) if c.out else fs_str("")
c.err = to_text("{0}".format(c.err)) if c.err else fs_str("")
if not return_object:
return c.out.strip(), c.err.strip()
return c
def run(
cmd,
env=None,
return_object=False,
block=True,
cwd=None,
verbose=False,
nospin=False,
spinner_name=None,
combine_stderr=True,
display_limit=200,
write_to_stdout=True,
):
"""Use `subprocess.Popen` to get the output of a command and decode it.
:param list cmd: A list representing the command you want to run.
:param dict env: Additional environment settings to pass through to the subprocess.
:param bool return_object: When True, returns the whole subprocess instance
:param bool block: When False, returns a potentially still-running :class:`subprocess.Popen` instance
:param str cwd: Current working directory contect to use for spawning the subprocess.
:param bool verbose: Whether to print stdout in real time when non-blocking.
:param bool nospin: Whether to disable the cli spinner.
:param str spinner_name: The name of the spinner to use if enabled, defaults to bouncingBar
:param bool combine_stderr: Optionally merge stdout and stderr in the subprocess, false if nonblocking.
:param int dispay_limit: The max width of output lines to display when using a spinner.
:param bool write_to_stdout: Whether to write to stdout when using a spinner, default True.
:returns: A 2-tuple of (output, error) or a :class:`subprocess.Popen` object.
.. Warning:: Merging standard out and standarad error in a nonblocking subprocess
can cause errors in some cases and may not be ideal. Consider disabling
this functionality.
"""
_env = os.environ.copy()
if env:
_env.update(env)
if six.PY2:
fs_encode = partial(to_bytes, encoding=locale_encoding)
_env = {fs_encode(k): fs_encode(v) for k, v in _env.items()}
else:
_env = {k: fs_str(v) for k, v in _env.items()}
if not spinner_name:
spinner_name = "bouncingBar"
if six.PY2:
if isinstance(cmd, six.string_types):
cmd = cmd.encode("utf-8")
elif isinstance(cmd, (list, tuple)):
cmd = [c.encode("utf-8") for c in cmd]
if not isinstance(cmd, Script):
cmd = Script.parse(cmd)
if block or not return_object:
combine_stderr = False
start_text = ""
with spinner(
spinner_name=spinner_name,
start_text=start_text,
nospin=nospin,
write_to_stdout=write_to_stdout,
) as sp:
return _create_subprocess(
cmd,
env=_env,
return_object=return_object,
block=block,
cwd=cwd,
verbose=verbose,
spinner=sp,
combine_stderr=combine_stderr,
start_text=start_text,
write_to_stdout=True,
)
def load_path(python):
"""Load the :mod:`sys.path` from the given python executable's environment as json
:param str python: Path to a valid python executable
:return: A python representation of the `sys.path` value of the given python executable.
:rtype: list
>>> load_path("/home/user/.virtualenvs/requirementslib-5MhGuG3C/bin/python")
['', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python37.zip', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/lib-dynload', '/home/user/.pyenv/versions/3.7.0/lib/python3.7', '/home/user/.virtualenvs/requirementslib-5MhGuG3C/lib/python3.7/site-packages', '/home/user/git/requirementslib/src']
"""
python = Path(python).as_posix()
out, err = run(
[python, "-c", "import json, sys; print(json.dumps(sys.path))"], nospin=True
)
if out:
return json.loads(out)
else:
return []
def partialclass(cls, *args, **kwargs):
"""Returns a partially instantiated class
:return: A partial class instance
:rtype: cls
>>> source = partialclass(Source, url="https://pypi.org/simple")
>>> source
<class '__main__.Source'>
>>> source(name="pypi")
>>> source.__dict__
mappingproxy({'__module__': '__main__', '__dict__': <attribute '__dict__' of 'Source' objects>, '__weakref__': <attribute '__weakref__' of 'Source' objects>, '__doc__': None, '__init__': functools.partialmethod(<function Source.__init__ at 0x7f23af429bf8>, , url='https://pypi.org/simple')})
>>> new_source = source(name="pypi")
>>> new_source
<__main__.Source object at 0x7f23af189b38>
>>> new_source.__dict__
{'url': 'https://pypi.org/simple', 'verify_ssl': True, 'name': 'pypi'}
"""
name_attrs = [
n
for n in (getattr(cls, name, str(cls)) for name in ("__name__", "__qualname__"))
if n is not None
]
name_attrs = name_attrs[0]
type_ = type(
name_attrs, (cls,), {"__init__": partialmethod(cls.__init__, *args, **kwargs)}
)
# Swiped from attrs.make_class
try:
type_.__module__ = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): # pragma: no cover
pass # pragma: no cover
return type_
# Borrowed from django -- force bytes and decode -- see link for details:
# https://github.com/django/django/blob/fc6b90b/django/utils/encoding.py#L112
def to_bytes(string, encoding="utf-8", errors=None):
"""Force a value to bytes.
:param string: Some input that can be converted to a bytes.
:type string: str or bytes unicode or a memoryview subclass
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: Corresponding byte representation (for use in filesystem operations)
:rtype: bytes
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if isinstance(string, bytes):
if get_canonical_encoding_name(encoding) == unicode_name:
return string
else:
return string.decode(unicode_name).encode(encoding, errors)
elif isinstance(string, memoryview):
return bytes(string)
elif not isinstance(string, six.string_types):
try:
if six.PY3:
return six.text_type(string).encode(encoding, errors)
else:
return bytes(string)
except UnicodeEncodeError:
if isinstance(string, Exception):
return b" ".join(to_bytes(arg, encoding, errors) for arg in string)
return six.text_type(string).encode(encoding, errors)
else:
return string.encode(encoding, errors)
def to_text(string, encoding="utf-8", errors=None):
"""Force a value to a text-type.
:param string: Some input that can be converted to a unicode representation.
:type string: str or bytes unicode
:param encoding: The encoding to use for conversions, defaults to "utf-8"
:param encoding: str, optional
:return: The unicode representation of the string
:rtype: str
"""
unicode_name = get_canonical_encoding_name("utf-8")
if not errors:
if get_canonical_encoding_name(encoding) == unicode_name:
if six.PY3 and os.name == "nt":
errors = "surrogatepass"
else:
errors = "surrogateescape" if six.PY3 else "ignore"
else:
errors = "strict"
if issubclass(type(string), six.text_type):
return string
try:
if not issubclass(type(string), six.string_types):
if six.PY3:
if isinstance(string, bytes):
string = six.text_type(string, encoding, errors)
else:
string = six.text_type(string)
elif hasattr(string, "__unicode__"):
string = six.text_type(string)
else:
string = six.text_type(bytes(string), encoding, errors)
else:
string = string.decode(encoding, errors)
except UnicodeDecodeError:
string = " ".join(to_text(arg, encoding, errors) for arg in string)
return string
def divide(n, iterable):
"""
split an iterable into n groups, per https://more-itertools.readthedocs.io/en/latest/api.html#grouping
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
:return: a list of new iterables derived from the original iterable
:rtype: list
"""
seq = tuple(iterable)
q, r = divmod(len(seq), n)
ret = []
for i in range(n):
start = (i * q) + (i if i < r else r)
stop = ((i + 1) * q) + (i + 1 if i + 1 < r else r)
ret.append(iter(seq[start:stop]))
return ret
def take(n, iterable):
"""Take n elements from the supplied iterable without consuming it.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/recipes.py
"""
return list(islice(iterable, n))
def chunked(n, iterable):
"""Split an iterable into lists of length *n*.
:param int n: Number of unique groups
:param iter iterable: An iterable to split up
from https://github.com/erikrose/more-itertools/blob/master/more_itertools/more.py
"""
return iter(partial(take, n, iter(iterable)), [])
try:
locale_encoding = locale.getdefaultlocale()[1] or "ascii"
except Exception:
locale_encoding = "ascii"
def getpreferredencoding():
"""Determine the proper output encoding for terminal rendering"""
# Borrowed from Invoke
# (see https://github.com/pyinvoke/invoke/blob/93af29d/invoke/runners.py#L881)
_encoding = locale.getpreferredencoding(False)
if six.PY2 and not sys.platform == "win32":
_default_encoding = locale.getdefaultlocale()[1]
if _default_encoding is not None:
_encoding = _default_encoding
return _encoding
PREFERRED_ENCODING = getpreferredencoding()
def get_output_encoding(source_encoding):
"""
Given a source encoding, determine the preferred output encoding.
:param str source_encoding: The encoding of the source material.
:returns: The output encoding to decode to.
:rtype: str
"""
if source_encoding is not None:
if get_canonical_encoding_name(source_encoding) == "ascii":
return "utf-8"
return get_canonical_encoding_name(source_encoding)
return get_canonical_encoding_name(PREFERRED_ENCODING)
def _encode(output, encoding=None, errors=None, translation_map=None):
if encoding is None:
encoding = PREFERRED_ENCODING
try:
output = output.encode(encoding)
except (UnicodeDecodeError, UnicodeEncodeError):
if translation_map is not None:
if six.PY2:
output = unicode.translate( # noqa: F821
to_text(output, encoding=encoding, errors=errors), translation_map
)
else:
output = output.translate(translation_map)
else:
output = to_text(output, encoding=encoding, errors=errors)
except AttributeError:
pass
return output
def decode_for_output(output, target_stream=None, translation_map=None):
"""Given a string, decode it for output to a terminal
:param str output: A string to print to a terminal
:param target_stream: A stream to write to, we will encode to target this stream if possible.
:param dict translation_map: A mapping of unicode character ordinals to replacement strings.
:return: A re-encoded string using the preferred encoding
:rtype: str
"""
if not isinstance(output, six.string_types):
return output
encoding = None
if target_stream is not None:
encoding = getattr(target_stream, "encoding", None)
encoding = get_output_encoding(encoding)
try:
output = _encode(output, encoding=encoding, translation_map=translation_map)
except (UnicodeDecodeError, UnicodeEncodeError):
output = to_native_string(output)
output = _encode(
output, encoding=encoding, errors="replace", translation_map=translation_map
)
return to_text(output, encoding=encoding, errors="replace")
def get_canonical_encoding_name(name):
# type: (str) -> str
"""
Given an encoding name, get the canonical name from a codec lookup.
:param str name: The name of the codec to lookup
:return: The canonical version of the codec name
:rtype: str
"""
import codecs
try:
codec = codecs.lookup(name)
except LookupError:
return name
else:
return codec.name
def _is_binary_buffer(stream):
try:
stream.write(b"")
except Exception:
try:
stream.write("")
except Exception:
pass
return False
return True
def _get_binary_buffer(stream):
if six.PY3 and not _is_binary_buffer(stream):
stream = getattr(stream, "buffer", None)
if stream is not None and _is_binary_buffer(stream):
return stream
return stream
def get_wrapped_stream(stream, encoding=None, errors="replace"):
"""
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream.
:param stream: A stream instance to wrap
:param str encoding: The encoding to use for the stream
:param str errors: The error handler to use, default "replace"
:returns: A new, wrapped stream
:rtype: :class:`StreamWrapper`
"""
if stream is None:
raise TypeError("must provide a stream to wrap")
stream = _get_binary_buffer(stream)
if stream is not None and encoding is None:
encoding = "utf-8"
if not encoding:
encoding = get_output_encoding(stream)
else:
encoding = get_canonical_encoding_name(encoding)
return StreamWrapper(stream, encoding, errors, line_buffering=True)
class StreamWrapper(io.TextIOWrapper):
"""
This wrapper class will wrap a provided stream and supply an interface
for compatibility.
"""
def __init__(self, stream, encoding, errors, line_buffering=True, **kwargs):
self._stream = stream = _StreamProvider(stream)
io.TextIOWrapper.__init__(
self, stream, encoding, errors, line_buffering=line_buffering, **kwargs
)
# borrowed from click's implementation of stream wrappers, see
# https://github.com/pallets/click/blob/6cafd32/click/_compat.py#L64
if six.PY2:
def write(self, x):
if isinstance(x, (str, buffer, bytearray)): # noqa: F821
try:
self.flush()
except Exception:
pass
# This is modified from the initial implementation to rely on
# our own decoding functionality to preserve unicode strings where
# possible
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
else:
def write(self, x):
# try to use backslash and surrogate escape strategies before failing
old_errors = getattr(self, "_errors", self.errors)
self._errors = (
"backslashescape" if self.encoding != "mbcs" else "surrogateescape"
)
try:
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
except UnicodeDecodeError:
self._errors = old_errors
return io.TextIOWrapper.write(self, to_text(x, errors=self._errors))
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
return self._stream.isatty()
# More things borrowed from click, this is because we are using `TextIOWrapper` instead of
# just a normal StringIO
class _StreamProvider(object):
def __init__(self, stream):
self._stream = stream
super(_StreamProvider, self).__init__()
def __getattr__(self, name):
return getattr(self._stream, name)
def read1(self, size):
fn = getattr(self._stream, "read1", None)
if fn is not None:
return fn(size)
if six.PY2:
return self._stream.readline(size)
return self._stream.read(size)
def readable(self):
fn = getattr(self._stream, "readable", None)
if fn is not None:
return fn()
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
fn = getattr(self._stream, "writable", None)
if fn is not None:
return fn()
try:
self._stream.write(b"")
except Exception:
return False
return True
def seekable(self):
fn = getattr(self._stream, "seekable", None)
if fn is not None:
return fn()
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
# XXX: The approach here is inspired somewhat by click with details taken from various
# XXX: other sources. Specifically we are using a stream cache and stream wrapping
# XXX: techniques from click (loosely inspired for the most part, with many details)
# XXX: heavily modified to suit our needs
def _isatty(stream):
try:
is_a_tty = stream.isatty()
except Exception:
is_a_tty = False
return is_a_tty
_wrap_for_color = None
try:
import colorama
except ImportError:
colorama = None
_color_stream_cache = WeakKeyDictionary()
if os.name == "nt" or sys.platform.startswith("win"):
if colorama is not None:
def _wrap_for_color(stream, color=None):
try:
cached = _color_stream_cache.get(stream)
except KeyError:
cached = None
if cached is not None:
return cached
strip = not _can_use_color(stream, color)
_color_wrapper = colorama.AnsiToWin32(stream, strip=strip)
result = _color_wrapper.stream
_write = result.write
def _write_with_color(s):
try:
return _write(s)
except Exception:
_color_wrapper.reset_all()
raise
result.write = _write_with_color
try:
_color_stream_cache[stream] = result
except Exception:
pass
return result
def _cached_stream_lookup(stream_lookup_func, stream_resolution_func):
stream_cache = WeakKeyDictionary()
def lookup():
stream = stream_lookup_func()
result = None
if stream in stream_cache:
result = stream_cache.get(stream, None)
if result is not None:
return result
result = stream_resolution_func()
try:
stream = stream_lookup_func()
stream_cache[stream] = result
except Exception:
pass
return result
return lookup
def get_text_stream(stream="stdout", encoding=None):
"""Retrieve a unicode stream wrapper around **sys.stdout** or **sys.stderr**.
:param str stream: The name of the stream to wrap from the :mod:`sys` module.
:param str encoding: An optional encoding to use.
:return: A new :class:`~vistir.misc.StreamWrapper` instance around the stream
:rtype: `vistir.misc.StreamWrapper`
"""
stream_map = {"stdin": sys.stdin, "stdout": sys.stdout, "stderr": sys.stderr}
if os.name == "nt" or sys.platform.startswith("win"):
from ._winconsole import _get_windows_console_stream, _wrap_std_stream
else:
_get_windows_console_stream = lambda *args: None # noqa
_wrap_std_stream = lambda *args: None # noqa
if six.PY2 and stream != "stdin":
_wrap_std_stream(stream)
sys_stream = stream_map[stream]
windows_console = _get_windows_console_stream(sys_stream, encoding, None)
if windows_console is not None:
return windows_console
return get_wrapped_stream(sys_stream, encoding)
def get_text_stdout():
return get_text_stream("stdout")
def get_text_stderr():
return get_text_stream("stderr")
def get_text_stdin():
return get_text_stream("stdin")
TEXT_STREAMS = {
"stdin": get_text_stdin,
"stdout": get_text_stdout,
"stderr": get_text_stderr,
}
_text_stdin = _cached_stream_lookup(lambda: sys.stdin, get_text_stdin)
_text_stdout = _cached_stream_lookup(lambda: sys.stdout, get_text_stdout)
_text_stderr = _cached_stream_lookup(lambda: sys.stderr, get_text_stderr)
def replace_with_text_stream(stream_name):
"""Given a stream name, replace the target stream with a text-converted equivalent
:param str stream_name: The name of a target stream, such as **stdout** or **stderr**
:return: None
"""
new_stream = TEXT_STREAMS.get(stream_name)
if new_stream is not None:
new_stream = new_stream()
setattr(sys, stream_name, new_stream)
return None
def _can_use_color(stream=None, color=None):
from .termcolors import DISABLE_COLORS
if DISABLE_COLORS:
return False
if not color:
if not stream:
stream = sys.stdin
return _isatty(stream)
return bool(color)
|
sarugaku/vistir
|
src/vistir/cursor.py
|
get_stream_handle
|
python
|
def get_stream_handle(stream=sys.stdout):
handle = stream
if os.name == "nt":
from ._winconsole import get_stream_handle as get_win_stream_handle
return get_win_stream_handle(stream)
return handle
|
Get the OS appropriate handle for the corresponding output stream.
:param str stream: The the stream to get the handle for
:return: A handle to the appropriate stream, either a ctypes buffer
or **sys.stdout** or **sys.stderr**.
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/cursor.py#L10-L23
| null |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import os
import sys
__all__ = ["hide_cursor", "show_cursor", "get_stream_handle"]
def hide_cursor(stream=sys.stdout):
"""
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ._winconsole import hide_cursor
hide_cursor()
else:
handle.write("\033[?25l")
handle.flush()
def show_cursor(stream=sys.stdout):
"""
Show the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ._winconsole import show_cursor
show_cursor()
else:
handle.write("\033[?25h")
handle.flush()
|
sarugaku/vistir
|
src/vistir/cursor.py
|
hide_cursor
|
python
|
def hide_cursor(stream=sys.stdout):
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ._winconsole import hide_cursor
hide_cursor()
else:
handle.write("\033[?25l")
handle.flush()
|
Hide the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/cursor.py#L26-L42
|
[
"def get_stream_handle(stream=sys.stdout):\n \"\"\"\n Get the OS appropriate handle for the corresponding output stream.\n\n :param str stream: The the stream to get the handle for\n :return: A handle to the appropriate stream, either a ctypes buffer\n or **sys.stdout** or **sys.stderr**.\n \"\"\"\n handle = stream\n if os.name == \"nt\":\n from ._winconsole import get_stream_handle as get_win_stream_handle\n\n return get_win_stream_handle(stream)\n return handle\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function
import os
import sys
__all__ = ["hide_cursor", "show_cursor", "get_stream_handle"]
def get_stream_handle(stream=sys.stdout):
"""
Get the OS appropriate handle for the corresponding output stream.
:param str stream: The the stream to get the handle for
:return: A handle to the appropriate stream, either a ctypes buffer
or **sys.stdout** or **sys.stderr**.
"""
handle = stream
if os.name == "nt":
from ._winconsole import get_stream_handle as get_win_stream_handle
return get_win_stream_handle(stream)
return handle
def show_cursor(stream=sys.stdout):
"""
Show the console cursor on the given stream
:param stream: The name of the stream to get the handle for
:return: None
:rtype: None
"""
handle = get_stream_handle(stream=stream)
if os.name == "nt":
from ._winconsole import show_cursor
show_cursor()
else:
handle.write("\033[?25h")
handle.flush()
|
sarugaku/vistir
|
tasks/__init__.py
|
clean
|
python
|
def clean(ctx):
ctx.run(f"python setup.py clean")
dist = ROOT.joinpath("dist")
print(f"[clean] Removing {dist}")
if dist.exists():
shutil.rmtree(str(dist))
|
Clean previously built package artifacts.
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/tasks/__init__.py#L26-L33
| null |
import pathlib
import re
import shutil
import subprocess
import invoke
import parver
from towncrier._builder import find_fragments, render_fragments, split_fragments
from towncrier._settings import load_config
def _get_git_root(ctx):
return pathlib.Path(
ctx.run("git rev-parse --show-toplevel", hide=True).stdout.strip()
)
ROOT = pathlib.Path(__file__).resolve().parent.parent
PACKAGE_NAME = "vistir"
INIT_PY = ROOT.joinpath("src", PACKAGE_NAME, "__init__.py")
@invoke.task()
def _read_version():
out = subprocess.check_output(["git", "tag"], encoding="ascii")
try:
version = max(
parver.Version.parse(v).normalize()
for v in (line.strip() for line in out.split("\n"))
if v
)
except ValueError:
version = parver.Version.parse("0.0.0")
return version
def _write_version(v):
lines = []
with INIT_PY.open() as f:
for line in f:
if line.startswith("__version__ = "):
line = f"__version__ = {repr(str(v))}\n".replace("'", '"')
lines.append(line)
with INIT_PY.open("w", newline="\n") as f:
f.write("".join(lines))
def _render_log():
"""Totally tap into Towncrier internals to get an in-memory result.
"""
config = load_config(ROOT)
definitions = config["types"]
fragments, fragment_filenames = find_fragments(
pathlib.Path(config["directory"]).absolute(),
config["sections"],
None,
definitions,
)
rendered = render_fragments(
pathlib.Path(config["template"]).read_text(encoding="utf-8"),
config["issue_format"],
split_fragments(fragments, definitions),
definitions,
config["underlines"][1:],
False, # Don't add newlines to wrapped text.
)
return rendered
REL_TYPES = ("major", "minor", "patch")
def _bump_release(version, type_, log=False):
if type_ not in REL_TYPES:
raise ValueError(f"{type_} not in {REL_TYPES}")
index = REL_TYPES.index(type_)
next_version = version.base_version().bump_release(index=index)
if log:
print(f"[bump] {version} -> {next_version}")
print(f"{next_version}")
return next_version
def _prebump(version, prebump, log=False):
next_version = version.bump_release(index=prebump).bump_dev()
if log:
print(f"[bump] {version} -> {next_version}")
print(f"{next_version}")
return next_version
PREBUMP = "patch"
@invoke.task(pre=[clean])
def build(ctx):
ctx.run("python setup.py sdist bdist_wheel")
@invoke.task()
def get_next_version(ctx, type_="patch", log=False):
version = _read_version()
if type_ in ("dev", "pre"):
idx = REL_TYPES.index("patch")
new_version = _prebump(version, idx, log=log)
else:
new_version = _bump_release(version, type_, log=log)
return new_version
@invoke.task()
def bump_version(ctx, type_="patch", log=False, dry_run=False):
new_version = get_next_version(ctx, type_, log=log)
if not dry_run:
_write_version(new_version)
return new_version
@invoke.task()
def generate_news(ctx, yes=False, dry_run=False):
command = "towncrier"
if dry_run:
command = f"{command} --draft"
elif yes:
command = f"{command} --yes"
ctx.run(command)
@invoke.task()
def get_changelog(ctx):
changelog = _render_log()
print(changelog)
return changelog
@invoke.task(optional=["version", "type_"])
def tag_release(ctx, version=None, type_="patch", yes=False, dry_run=False):
if version is None:
version = bump_version(ctx, type_, log=not dry_run, dry_run=dry_run)
else:
_write_version(version)
tag_content = get_changelog(ctx)
generate_news(ctx, yes=yes, dry_run=dry_run)
git_commit_cmd = f'git commit -am "Release {version}"'
tag_content = tag_content.replace('"', '\\"')
git_tag_cmd = f'git tag -a {version} -m "Version {version}\n\n{tag_content}"'
if dry_run:
print("Would run commands:")
print(f" {git_commit_cmd}")
print(f" {git_tag_cmd}")
else:
ctx.run(git_commit_cmd)
ctx.run(git_tag_cmd)
@invoke.task(pre=[clean])
def release(ctx, type_, repo, prebump=PREBUMP, yes=False):
"""Make a new release.
"""
if prebump not in REL_TYPES:
raise ValueError(f"{type_} not in {REL_TYPES}")
prebump = REL_TYPES.index(prebump)
version = bump_version(ctx, type_, log=True)
# Needs to happen before Towncrier deletes fragment files.
tag_release(version, yes=yes)
ctx.run(f"python setup.py sdist bdist_wheel")
dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*'
artifacts = list(ROOT.joinpath("dist").glob(dist_pattern))
filename_display = "\n".join(f" {a}" for a in artifacts)
print(f"[release] Will upload:\n{filename_display}")
if not yes:
try:
input("[release] Release ready. ENTER to upload, CTRL-C to abort: ")
except KeyboardInterrupt:
print("\nAborted!")
return
arg_display = " ".join(f'"{n}"' for n in artifacts)
ctx.run(f'twine upload --repository="{repo}" {arg_display}')
version = _prebump(version, prebump)
_write_version(version)
ctx.run(f'git commit -am "Prebump to {version}"')
@invoke.task
def build_docs(ctx):
from vistir import __version__
_current_version = parver.Version.parse(__version__)
minor = [str(i) for i in _current_version.release[:2]]
docs_folder = (_get_git_root(ctx) / "docs").as_posix()
if not docs_folder.endswith("/"):
docs_folder = "{0}/".format(docs_folder)
args = ["--ext-autodoc", "--ext-viewcode", "-o", docs_folder]
args.extend(["-A", "'Dan Ryan <dan@danryan.co>'"])
args.extend(["-R", str(_current_version)])
args.extend(["-V", ".".join(minor)])
args.extend(["-e", "-M", "-F", f"src/{PACKAGE_NAME}"])
print("Building docs...")
ctx.run("sphinx-apidoc {0}".format(" ".join(args)))
@invoke.task
def clean_mdchangelog(ctx):
root = _get_git_root(ctx)
changelog = root / "CHANGELOG.md"
content = changelog.read_text()
content = re.sub(
r"([^\n]+)\n?\s+\[[\\]+(#\d+)\]\(https://github\.com/sarugaku/[\w\-]+/issues/\d+\)",
r"\1 \2",
content,
flags=re.MULTILINE,
)
changelog.write_text(content)
|
sarugaku/vistir
|
src/vistir/backports/tempfile.py
|
_sanitize_params
|
python
|
def _sanitize_params(prefix, suffix, dir):
output_type = _infer_return_type(prefix, suffix, dir)
if suffix is None:
suffix = output_type()
if prefix is None:
if output_type is str:
prefix = "tmp"
else:
prefix = os.fsencode("tmp")
if dir is None:
if output_type is str:
dir = gettempdir()
else:
dir = fs_encode(gettempdir())
return prefix, suffix, dir, output_type
|
Common parameter processing for most APIs in this module.
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/backports/tempfile.py#L55-L70
|
[
"def fs_encode(path):\n try:\n return os.fsencode(path)\n except AttributeError:\n from ..compat import fs_encode\n\n return fs_encode(path)\n",
"def _infer_return_type(*args):\n _types = set()\n for arg in args:\n if isinstance(type(arg), six.string_types):\n _types.add(str)\n elif isinstance(type(arg), bytes):\n _types.add(bytes)\n elif arg:\n _types.add(type(arg))\n return _types.pop()\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, unicode_literals
import functools
import io
import os
import sys
from tempfile import _bin_openflags, _mkstemp_inner, gettempdir
import six
try:
from weakref import finalize
except ImportError:
from backports.weakref import finalize
def fs_encode(path):
try:
return os.fsencode(path)
except AttributeError:
from ..compat import fs_encode
return fs_encode(path)
def fs_decode(path):
try:
return os.fsdecode(path)
except AttributeError:
from ..compat import fs_decode
return fs_decode(path)
__all__ = ["finalize", "NamedTemporaryFile"]
try:
from tempfile import _infer_return_type
except ImportError:
def _infer_return_type(*args):
_types = set()
for arg in args:
if isinstance(type(arg), six.string_types):
_types.add(str)
elif isinstance(type(arg), bytes):
_types.add(bytes)
elif arg:
_types.add(type(arg))
return _types.pop()
class _TemporaryFileCloser:
"""A separate object allowing proper closing of a temporary file's
underlying file object, without adding a __del__ method to the
temporary file."""
file = None # Set here since __del__ checks it
close_called = False
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if os.name != "nt":
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
def close(self, unlink=os.unlink):
if not self.close_called and self.file is not None:
self.close_called = True
try:
self.file.close()
finally:
if self.delete:
unlink(self.name)
# Need to ensure the file is deleted on __del__
def __del__(self):
self.close()
else:
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.delete = delete
self._closer = _TemporaryFileCloser(file, name, delete)
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__["file"]
a = getattr(file, name)
if hasattr(a, "__call__"):
func = a
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
# Avoid closing the file as long as the wrapper is alive,
# see issue #18879.
func_wrapper._closer = self._closer
a = func_wrapper
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def close(self):
"""
Close the temporary file, possibly deleting it.
"""
self._closer.close()
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
# Don't return iter(self.file), but yield from it to avoid closing
# file as long as it's being used as iterator (see issue #23700). We
# can't use 'yield from' here because iter(file) returns the file
# object itself, which has a close method, and thus the file would get
# closed when the generator is finalized, due to PEP380 semantics.
for line in self.file:
yield line
def NamedTemporaryFile(
mode="w+b",
buffering=-1,
encoding=None,
newline=None,
suffix=None,
prefix=None,
dir=None,
delete=True,
wrapper_class_override=None,
):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as its 'name' attribute. The file will be automatically
deleted when it is closed unless the 'delete' argument is set to False.
"""
prefix, suffix, dir, output_type = _sanitize_params(prefix, suffix, dir)
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if not wrapper_class_override:
wrapper_class_override = _TemporaryFileWrapper
if os.name == "nt" and delete:
flags |= os.O_TEMPORARY
if sys.version_info < (3, 5):
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
else:
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
try:
file = io.open(fd, mode, buffering=buffering, newline=newline, encoding=encoding)
if wrapper_class_override is not None:
return type(str("_TempFileWrapper"), (wrapper_class_override, object), {})(
file, name, delete
)
else:
return _TemporaryFileWrapper(file, name, delete)
except BaseException:
os.unlink(name)
os.close(fd)
raise
|
sarugaku/vistir
|
src/vistir/backports/surrogateescape.py
|
register_surrogateescape
|
python
|
def register_surrogateescape():
if six.PY3:
return
try:
codecs.lookup_error(FS_ERRORS)
except LookupError:
codecs.register_error(FS_ERRORS, surrogateescape_handler)
|
Registers the surrogateescape error handler on Python 2 (only)
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/backports/surrogateescape.py#L183-L192
| null |
"""
This is Victor Stinner's pure-Python implementation of PEP 383: the "surrogateescape" error
handler of Python 3.
Source: misc/python/surrogateescape.py in https://bitbucket.org/haypo/misc
"""
# This code is released under the Python license and the BSD 2-clause license
import codecs
import sys
import six
FS_ERRORS = "surrogateescape"
# # -- Python 2/3 compatibility -------------------------------------
# FS_ERRORS = 'my_surrogateescape'
def u(text):
if six.PY3:
return text
else:
return text.decode("unicode_escape")
def b(data):
if six.PY3:
return data.encode("latin1")
else:
return data
if six.PY3:
_unichr = chr
bytes_chr = lambda code: bytes((code,))
else:
_unichr = unichr
bytes_chr = chr
def surrogateescape_handler(exc):
"""
Pure Python implementation of the PEP 383: the "surrogateescape" error
handler of Python 3. Undecodable bytes will be replaced by a Unicode
character U+DCxx on decoding, and these are translated into the
original bytes on encoding.
"""
mystring = exc.object[exc.start : exc.end]
try:
if isinstance(exc, UnicodeDecodeError):
# mystring is a byte-string in this case
decoded = replace_surrogate_decode(mystring)
elif isinstance(exc, UnicodeEncodeError):
# In the case of u'\udcc3'.encode('ascii',
# 'this_surrogateescape_handler'), both Python 2.x and 3.x raise an
# exception anyway after this function is called, even though I think
# it's doing what it should. It seems that the strict encoder is called
# to encode the unicode string that this function returns ...
decoded = replace_surrogate_encode(mystring)
else:
raise exc
except NotASurrogateError:
raise exc
return (decoded, exc.end)
class NotASurrogateError(Exception):
pass
def replace_surrogate_encode(mystring):
"""
Returns a (unicode) string, not the more logical bytes, because the codecs
register_error functionality expects this.
"""
decoded = []
for ch in mystring:
# if utils.PY3:
# code = ch
# else:
code = ord(ch)
# The following magic comes from Py3.3's Python/codecs.c file:
if not 0xD800 <= code <= 0xDCFF:
# Not a surrogate. Fail with the original exception.
raise NotASurrogateError
# mybytes = [0xe0 | (code >> 12),
# 0x80 | ((code >> 6) & 0x3f),
# 0x80 | (code & 0x3f)]
# Is this a good idea?
if 0xDC00 <= code <= 0xDC7F:
decoded.append(_unichr(code - 0xDC00))
elif code <= 0xDCFF:
decoded.append(_unichr(code - 0xDC00))
else:
raise NotASurrogateError
return str().join(decoded)
def replace_surrogate_decode(mybytes):
"""
Returns a (unicode) string
"""
decoded = []
for ch in mybytes:
# We may be parsing newbytes (in which case ch is an int) or a native
# str on Py2
if isinstance(ch, int):
code = ch
else:
code = ord(ch)
if 0x80 <= code <= 0xFF:
decoded.append(_unichr(0xDC00 + code))
elif code <= 0x7F:
decoded.append(_unichr(code))
else:
# # It may be a bad byte
# # Try swallowing it.
# continue
# print("RAISE!")
raise NotASurrogateError
return str().join(decoded)
def encodefilename(fn):
if FS_ENCODING == "ascii":
# ASCII encoder of Python 2 expects that the error handler returns a
# Unicode string encodable to ASCII, whereas our surrogateescape error
# handler has to return bytes in 0x80-0xFF range.
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if code < 128:
ch = bytes_chr(code)
elif 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
else:
raise UnicodeEncodeError(
FS_ENCODING, fn, index, index + 1, "ordinal not in range(128)"
)
encoded.append(ch)
return bytes().join(encoded)
elif FS_ENCODING == "utf-8":
# UTF-8 encoder of Python 2 encodes surrogates, so U+DC80-U+DCFF
# doesn't go through our error handler
encoded = []
for index, ch in enumerate(fn):
code = ord(ch)
if 0xD800 <= code <= 0xDFFF:
if 0xDC80 <= code <= 0xDCFF:
ch = bytes_chr(code - 0xDC00)
encoded.append(ch)
else:
raise UnicodeEncodeError(
FS_ENCODING, fn, index, index + 1, "surrogates not allowed"
)
else:
ch_utf8 = ch.encode("utf-8")
encoded.append(ch_utf8)
return bytes().join(encoded)
else:
return fn.encode(FS_ENCODING, FS_ERRORS)
def decodefilename(fn):
return fn.decode(FS_ENCODING, FS_ERRORS)
FS_ENCODING = "ascii"
fn = b("[abc\xff]")
encoded = u("[abc\udcff]")
# FS_ENCODING = 'cp932'; fn = b('[abc\x81\x00]'); encoded = u('[abc\udc81\x00]')
# FS_ENCODING = 'UTF-8'; fn = b('[abc\xff]'); encoded = u('[abc\udcff]')
# normalize the filesystem encoding name.
# For example, we expect "utf-8", not "UTF8".
FS_ENCODING = codecs.lookup(FS_ENCODING).name
if __name__ == "__main__":
pass
|
sarugaku/vistir
|
src/vistir/path.py
|
set_write_bit
|
python
|
def set_write_bit(fn):
# type: (str) -> None
fn = fs_encode(fn)
if not os.path.exists(fn):
return
file_stat = os.stat(fn).st_mode
os.chmod(fn, file_stat | stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if os.name == "nt":
from ._winconsole import get_current_user
user_sid = get_current_user()
icacls_exe = _find_icacls_exe() or "icacls"
from .misc import run
if user_sid:
_, err = run([icacls_exe, "/grant", "{0}:WD".format(user_sid), "''{0}''".format(fn), "/T", "/C", "/Q"])
if not err:
return
if not os.path.isdir(fn):
for path in [fn, os.path.dirname(fn)]:
try:
os.chflags(path, 0)
except AttributeError:
pass
return None
for root, dirs, files in os.walk(fn, topdown=False):
for dir_ in [os.path.join(root, d) for d in dirs]:
set_write_bit(dir_)
for file_ in [os.path.join(root, f) for f in files]:
set_write_bit(file_)
|
Set read-write permissions for the current user on the target path. Fail silently
if the path doesn't exist.
:param str fn: The target filename or path
:return: None
|
train
|
https://github.com/sarugaku/vistir/blob/96c008ee62a43608d1e70797f74634cb66a004c1/src/vistir/path.py#L325-L362
|
[
"def fs_encode(path):\n \"\"\"\n Encode a filesystem path to the proper filesystem encoding\n\n :param Union[str, bytes] path: A string-like path\n :returns: A bytes-encoded filesystem path representation\n \"\"\"\n\n path = _get_path(path)\n if path is None:\n raise TypeError(\"expected a valid path to encode\")\n if isinstance(path, six.text_type):\n if six.PY2:\n return b\"\".join(\n (\n _byte(ord(c) - 0xDC00)\n if 0xDC00 <= ord(c) <= 0xDCFF\n else c.encode(_fs_encoding, _fs_encode_errors)\n )\n for c in path\n )\n return path.encode(_fs_encoding, _fs_encode_errors)\n return path\n"
] |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import functools
import os
import posixpath
import shutil
import stat
import time
import warnings
import six
from six.moves import urllib_parse
from six.moves.urllib import request as urllib_request
from .backports.tempfile import _TemporaryFileWrapper
from .compat import (
Path,
ResourceWarning,
TemporaryDirectory,
FileNotFoundError,
PermissionError,
_fs_encoding,
_NamedTemporaryFile,
finalize,
fs_decode,
fs_encode,
IS_TYPE_CHECKING,
)
if IS_TYPE_CHECKING:
from typing import Optional, Callable, Text, ByteString, AnyStr
__all__ = [
"check_for_unc_path",
"get_converted_relative_path",
"handle_remove_readonly",
"normalize_path",
"is_in_path",
"is_file_url",
"is_readonly_path",
"is_valid_url",
"mkdir_p",
"ensure_mkdir_p",
"create_tracked_tempdir",
"create_tracked_tempfile",
"path_to_url",
"rmtree",
"safe_expandvars",
"set_write_bit",
"url_to_path",
"walk_up",
]
if os.name == "nt":
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="The Windows bytes API has been deprecated.*",
)
def unicode_path(path):
# Paths are supposed to be represented as unicode here
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(_fs_encoding)
return path
def native_path(path):
if six.PY2 and not isinstance(path, bytes):
return path.encode(_fs_encoding)
return path
# once again thank you django...
# https://github.com/django/django/blob/fc6b90b/django/utils/_os.py
if six.PY3 or os.name == "nt":
abspathu = os.path.abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not os.path.isabs(path):
path = os.path.join(os.getcwdu(), path)
return os.path.normpath(path)
def normalize_path(path):
# type: (AnyStr) -> AnyStr
"""
Return a case-normalized absolute variable-expanded path.
:param str path: The non-normalized path
:return: A normalized, expanded, case-normalized path
:rtype: str
"""
return os.path.normpath(
os.path.normcase(
os.path.abspath(os.path.expandvars(os.path.expanduser(str(path))))
)
)
def is_in_path(path, parent):
# type: (AnyStr, AnyStr) -> bool
"""
Determine if the provided full path is in the given parent root.
:param str path: The full path to check the location of.
:param str parent: The parent path to check for membership in
:return: Whether the full path is a member of the provided parent.
:rtype: bool
"""
return normalize_path(str(path)).startswith(normalize_path(str(parent)))
def normalize_drive(path):
# type: (str) -> Text
"""Normalize drive in path so they stay consistent.
This currently only affects local drives on Windows, which can be
identified with either upper or lower cased drive names. The case is
always converted to uppercase because it seems to be preferred.
"""
from .misc import to_text
if os.name != "nt" or not isinstance(path, six.string_types):
return path
drive, tail = os.path.splitdrive(path)
# Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts.
if drive.islower() and len(drive) == 2 and drive[1] == ":":
return "{}{}".format(drive.upper(), tail)
return to_text(path, encoding="utf-8")
def path_to_url(path):
# type: (str) -> Text
"""Convert the supplied local path to a file uri.
:param str path: A string pointing to or representing a local path
:return: A `file://` uri for the same location
:rtype: str
>>> path_to_url("/home/user/code/myrepo/myfile.zip")
'file:///home/user/code/myrepo/myfile.zip'
"""
from .misc import to_text, to_bytes
if not path:
return path
path = to_bytes(path, encoding="utf-8")
normalized_path = to_text(normalize_drive(os.path.abspath(path)), encoding="utf-8")
return to_text(Path(normalized_path).as_uri(), encoding="utf-8")
def url_to_path(url):
# type: (str) -> ByteString
"""
Convert a valid file url to a local filesystem path
Follows logic taken from pip's equivalent function
"""
from .misc import to_bytes
assert is_file_url(url), "Only file: urls can be converted to local paths"
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# Netlocs are UNC paths
if netloc:
netloc = "\\\\" + netloc
path = urllib_request.url2pathname(netloc + path)
return to_bytes(path, encoding="utf-8")
def is_valid_url(url):
"""Checks if a given string is an url"""
from .misc import to_text
if not url:
return url
pieces = urllib_parse.urlparse(to_text(url))
return all([pieces.scheme, pieces.netloc])
def is_file_url(url):
"""Returns true if the given url is a file url"""
from .misc import to_text
if not url:
return False
if not isinstance(url, six.string_types):
try:
url = getattr(url, "url")
except AttributeError:
raise ValueError("Cannot parse url from unknown type: {0!r}".format(url))
url = to_text(url, encoding="utf-8")
return urllib_parse.urlparse(url.lower()).scheme == "file"
def is_readonly_path(fn):
"""Check if a provided path exists and is readonly.
Permissions check is `bool(path.stat & stat.S_IREAD)` or `not os.access(path, os.W_OK)`
"""
fn = fs_encode(fn)
if os.path.exists(fn):
file_stat = os.stat(fn).st_mode
return not bool(file_stat & stat.S_IWRITE) or not os.access(fn, os.W_OK)
return False
def mkdir_p(newdir, mode=0o777):
"""Recursively creates the target directory and all of its parents if they do not
already exist. Fails silently if they do.
:param str newdir: The directory path to ensure
:raises: OSError if a file is encountered along the way
"""
# http://code.activestate.com/recipes/82465-a-friendly-mkdir/
newdir = fs_encode(newdir)
if os.path.exists(newdir):
if not os.path.isdir(newdir):
raise OSError(
"a file with the same name as the desired dir, '{0}', already exists.".format(
fs_decode(newdir)
)
)
else:
head, tail = os.path.split(newdir)
# Make sure the tail doesn't point to the asame place as the head
curdir = fs_encode(".")
tail_and_head_match = (
os.path.relpath(tail, start=os.path.basename(head)) == curdir
)
if tail and not tail_and_head_match and not os.path.isdir(newdir):
target = os.path.join(head, tail)
if os.path.exists(target) and os.path.isfile(target):
raise OSError(
"A file with the same name as the desired dir, '{0}', already exists.".format(
fs_decode(newdir)
)
)
os.makedirs(os.path.join(head, tail), mode)
def ensure_mkdir_p(mode=0o777):
"""Decorator to ensure `mkdir_p` is called to the function's return value.
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
path = f(*args, **kwargs)
mkdir_p(path, mode=mode)
return path
return decorated
return decorator
TRACKED_TEMPORARY_DIRECTORIES = []
def create_tracked_tempdir(*args, **kwargs):
"""Create a tracked temporary directory.
This uses `TemporaryDirectory`, but does not remove the directory when
the return value goes out of scope, instead registers a handler to cleanup
on program exit.
The return value is the path to the created directory.
"""
tempdir = TemporaryDirectory(*args, **kwargs)
TRACKED_TEMPORARY_DIRECTORIES.append(tempdir)
atexit.register(tempdir.cleanup)
warnings.simplefilter("ignore", ResourceWarning)
return tempdir.name
def create_tracked_tempfile(*args, **kwargs):
"""Create a tracked temporary file.
This uses the `NamedTemporaryFile` construct, but does not remove the file
until the interpreter exits.
The return value is the file object.
"""
kwargs["wrapper_class_override"] = _TrackedTempfileWrapper
return _NamedTemporaryFile(*args, **kwargs)
def _find_icacls_exe():
if os.name == "nt":
paths = [
os.path.expandvars(r"%windir%\{0}").format(subdir)
for subdir in ("system32", "SysWOW64")
]
for path in paths:
icacls_path = next(
iter(fn for fn in os.listdir(path) if fn.lower() == "icacls.exe"), None
)
if icacls_path is not None:
icacls_path = os.path.join(path, icacls_path)
return icacls_path
return None
def rmtree(directory, ignore_errors=False, onerror=None):
# type: (str, bool, Optional[Callable]) -> None
"""
Stand-in for :func:`~shutil.rmtree` with additional error-handling.
This version of `rmtree` handles read-only paths, especially in the case of index
files written by certain source control systems.
:param str directory: The target directory to remove
:param bool ignore_errors: Whether to ignore errors, defaults to False
:param func onerror: An error handling function, defaults to :func:`handle_remove_readonly`
.. note::
Setting `ignore_errors=True` may cause this to silently fail to delete the path
"""
directory = fs_encode(directory)
if onerror is None:
onerror = handle_remove_readonly
try:
shutil.rmtree(directory, ignore_errors=ignore_errors, onerror=onerror)
except (IOError, OSError, FileNotFoundError, PermissionError) as exc:
# Ignore removal failures where the file doesn't exist
if exc.errno != errno.ENOENT:
raise
def _wait_for_files(path):
"""
Retry with backoff up to 1 second to delete files from a directory.
:param str path: The path to crawl to delete files from
:return: A list of remaining paths or None
:rtype: Optional[List[str]]
"""
timeout = 0.001
remaining = []
while timeout < 1.0:
remaining = []
if os.path.isdir(path):
L = os.listdir(path)
for target in L:
_remaining = _wait_for_files(target)
if _remaining:
remaining.extend(_remaining)
continue
try:
os.unlink(path)
except FileNotFoundError as e:
if e.errno == errno.ENOENT:
return
except (OSError, IOError, PermissionError):
time.sleep(timeout)
timeout *= 2
remaining.append(path)
else:
return
return remaining
def handle_remove_readonly(func, path, exc):
"""Error handler for shutil.rmtree.
Windows source repo folders are read-only by default, so this error handler
attempts to set them as writeable and then proceed with deletion.
:param function func: The caller function
:param str path: The target path for removal
:param Exception exc: The raised exception
This function will call check :func:`is_readonly_path` before attempting to call
:func:`set_write_bit` on the target path and try again.
"""
# Check for read-only attribute
from .compat import ResourceWarning, FileNotFoundError, PermissionError
PERM_ERRORS = (errno.EACCES, errno.EPERM, errno.ENOENT)
default_warning_message = "Unable to remove file due to permissions restriction: {!r}"
# split the initial exception out into its type, exception, and traceback
exc_type, exc_exception, exc_tb = exc
if is_readonly_path(path):
# Apply write permission and call original function
set_write_bit(path)
try:
func(path)
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno in PERM_ERRORS:
if e.errno == errno.ENOENT:
return
remaining = None
if os.path.isdir(path):
remaining = _wait_for_files(path)
if remaining:
warnings.warn(default_warning_message.format(path), ResourceWarning)
else:
func(path, ignore_errors=True)
return
if exc_exception.errno in PERM_ERRORS:
set_write_bit(path)
remaining = _wait_for_files(path)
try:
func(path)
except (OSError, IOError, FileNotFoundError, PermissionError) as e:
if e.errno in PERM_ERRORS:
if e.errno != errno.ENOENT: # File still exists
warnings.warn(default_warning_message.format(path), ResourceWarning)
return
else:
raise exc_exception
def walk_up(bottom):
"""Mimic os.walk, but walk 'up' instead of down the directory tree.
From: https://gist.github.com/zdavkeos/1098474
"""
bottom = os.path.realpath(bottom)
# Get files in current dir.
try:
names = os.listdir(bottom)
except Exception:
return
dirs, nondirs = [], []
for name in names:
if os.path.isdir(os.path.join(bottom, name)):
dirs.append(name)
else:
nondirs.append(name)
yield bottom, dirs, nondirs
new_path = os.path.realpath(os.path.join(bottom, ".."))
# See if we are at the top.
if new_path == bottom:
return
for x in walk_up(new_path):
yield x
def check_for_unc_path(path):
""" Checks to see if a pathlib `Path` object is a unc path or not"""
if (
os.name == "nt"
and len(path.drive) > 2
and not path.drive[0].isalpha()
and path.drive[1] != ":"
):
return True
else:
return False
def get_converted_relative_path(path, relative_to=None):
"""Convert `path` to be relative.
Given a vague relative path, return the path relative to the given
location.
:param str path: The location of a target path
:param str relative_to: The starting path to build against, optional
:returns: A relative posix-style path with a leading `./`
This performs additional conversion to ensure the result is of POSIX form,
and starts with `./`, or is precisely `.`.
>>> os.chdir('/home/user/code/myrepo/myfolder')
>>> vistir.path.get_converted_relative_path('/home/user/code/file.zip')
'./../../file.zip'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder/mysubfolder')
'./mysubfolder'
>>> vistir.path.get_converted_relative_path('/home/user/code/myrepo/myfolder')
'.'
"""
from .misc import to_text, to_bytes # noqa
if not relative_to:
relative_to = os.getcwdu() if six.PY2 else os.getcwd()
if six.PY2:
path = to_bytes(path, encoding="utf-8")
else:
path = to_text(path, encoding="utf-8")
relative_to = to_text(relative_to, encoding="utf-8")
start_path = Path(relative_to)
try:
start = start_path.resolve()
except OSError:
start = start_path.absolute()
# check if there is a drive letter or mount point
# if it is a mountpoint use the original absolute path
# instead of the unc path
if check_for_unc_path(start):
start = start_path.absolute()
path = start.joinpath(path).relative_to(start)
# check and see if the path that was passed into the function is a UNC path
# and raise value error if it is not.
if check_for_unc_path(path):
raise ValueError("The path argument does not currently accept UNC paths")
relpath_s = to_text(posixpath.normpath(path.as_posix()))
if not (relpath_s == "." or relpath_s.startswith("./")):
relpath_s = posixpath.join(".", relpath_s)
return relpath_s
def safe_expandvars(value):
"""Call os.path.expandvars if value is a string, otherwise do nothing.
"""
if isinstance(value, six.string_types):
return os.path.expandvars(value)
return value
class _TrackedTempfileWrapper(_TemporaryFileWrapper, object):
def __init__(self, *args, **kwargs):
super(_TrackedTempfileWrapper, self).__init__(*args, **kwargs)
self._finalizer = finalize(self, self.cleanup)
@classmethod
def _cleanup(cls, fileobj):
try:
fileobj.close()
finally:
os.unlink(fileobj.name)
def cleanup(self):
if self._finalizer.detach():
try:
self.close()
finally:
os.unlink(self.name)
else:
try:
self.close()
except OSError:
pass
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/variable_name_normalizer.py
|
normalize_variable_name
|
python
|
def normalize_variable_name(node, reachability_tester):
# type: (Dict[str, Any], ReferenceReachabilityTester) -> Optional[str]
node_type = NodeType(node['type'])
if not is_analyzable_identifier(node):
return None
if node_type is NodeType.IDENTIFIER:
return _normalize_identifier_value(node, reachability_tester)
# Nodes identifier-like without identifier is always normalized because
# the nodes can not have a visibility prefix.
if node_type in IdentifierLikeNodeTypes:
return node['value']
|
Returns normalized variable name.
Normalizing means that variable names get explicit visibility by
visibility prefix such as: "g:", "s:", ...
Returns None if the specified node is unanalyzable.
A node is unanalyzable if:
- the node is not identifier-like
- the node is named dynamically
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/variable_name_normalizer.py#L21-L45
|
[
"def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool\n \"\"\" Whether the specified node is an analyzable identifier.\n\n Node declarative-identifier-like is analyzable if it is not dynamic\n and not a member variable, because we can do static scope analysis.\n\n Analyzable cases:\n - let s:var = 0\n - function! Func()\n - echo s:var\n\n Unanalyzable cases:\n - let s:my_{var} = 0\n - function! dict.Func()\n - echo s:my_{var}\n \"\"\"\n return not (is_dynamic_identifier(node) or is_member_identifier(node))\n"
] |
from typing import Dict, Any, Optional # noqa: F401
from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.scope import (
ScopeVisibility,
ExplicityOfScopeVisibility,
)
from vint.ast.plugin.scope_plugin.scope_detector import (
is_analyzable_identifier,
IdentifierLikeNodeTypes,
)
from vint.ast.plugin.scope_plugin.reference_reachability_tester import ReferenceReachabilityTester
ImplicitScopeVisibilityToIdentifierScopePrefix = {
ScopeVisibility.GLOBAL_LIKE: 'g:',
ScopeVisibility.FUNCTION_LOCAL: 'l:',
ScopeVisibility.BUILTIN: 'v:',
}
def _normalize_identifier_value(id_node, reachability_tester):
# type: (Dict[str, Any], ReferenceReachabilityTester) -> str
visibility_hint = reachability_tester.get_objective_scope_visibility(id_node)
explicity = visibility_hint.explicity
# Of course, we can return soon if the variable already have a explicit scope prefix.
if explicity is ExplicityOfScopeVisibility.EXPLICIT \
or explicity is ExplicityOfScopeVisibility.UNRECOMMENDED_EXPLICIT:
return id_node['value']
# Builtin functions and function arguments can not have any explicit scope prefix.
if explicity is ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED:
return id_node['value']
scope_prefix = ImplicitScopeVisibilityToIdentifierScopePrefix[visibility_hint.scope_visibility]
return scope_prefix + id_node['value']
|
Kuniwak/vint
|
vint/ast/traversing.py
|
traverse
|
python
|
def traverse(node, on_enter=None, on_leave=None):
node_type = NodeType(node['type'])
if node_type not in ChildNodeAccessorMap:
raise UnknownNodeTypeException(node_type)
if on_enter:
should_traverse_children = on_enter(node) is not SKIP_CHILDREN
else:
should_traverse_children = True
if should_traverse_children:
for property_accessor in ChildNodeAccessorMap[node_type]:
accessor_func = property_accessor['accessor']
prop_name = property_accessor['property_name']
accessor_func(lambda child_node: traverse(child_node, on_enter, on_leave),
node[prop_name])
for handler in _traverser_extensions:
handler(node, on_enter=on_enter, on_leave=on_leave)
if on_leave:
on_leave(node)
|
Traverses the specified Vim script AST node (depth first order).
The on_enter/on_leave handler will be called with the specified node and
the children. You can skip traversing child nodes by returning
SKIP_CHILDREN.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/traversing.py#L226-L254
|
[
"def prettify_node_type(node):\n node['type'] = NodeType(node['type'])\n",
"def prettify_node_type(node):\n node['type'] = NodeType(node['type'])\n",
"def _check_scriptencoding(self, node):\n # TODO: Use BREAK when implemented\n if self.has_scriptencoding:\n return SKIP_CHILDREN\n\n node_type = NodeType(node['type'])\n\n if node_type is not NodeType.EXCMD:\n return\n\n self.has_scriptencoding = node['str'].startswith('scripte')\n",
"def _enter_handler(self, node): # type: (Dict[str, Any]) -> None\n node_type = NodeType(node['type'])\n\n if node_type is NodeType.FUNCTION:\n return self._handle_function_node(node)\n elif node_type is NodeType.LAMBDA:\n return self._handle_lambda_node(node)\n\n self._find_variable_like_nodes(node)\n",
"def _leave_handler(self, node): # type: (Dict[str, Any]) -> None\n node_type = NodeType(node['type'])\n\n if node_type is NodeType.FUNCTION:\n self._scope_tree_builder.leave_current_scope()\n\n elif node_type is NodeType.LAMBDA:\n self._scope_tree_builder.leave_current_scope()\n",
"def enter_handler(node):\n node_type = NodeType(node['type'])\n if node_type is not NodeType.CALL:\n return\n\n called_function_identifier = node['left']\n\n # The name node type of \"map\" or \"filter\" or \"call\" are always IDENTIFIER.\n if NodeType(called_function_identifier['type']) is not NodeType.IDENTIFIER:\n return\n\n called_function_identifier_value = called_function_identifier.get('value')\n\n if called_function_identifier_value in ['map', 'filter']:\n # Analyze second argument of \"map\" or \"filter\" if the node type is STRING.\n self._attach_string_expr_content_to_map_or_func(node)\n elif called_function_identifier_value in ['call', 'function']:\n # Analyze first argument of \"call\" or \"function\" if the node type is STRING.\n self._attach_string_expr_content_to_call_or_function(node)\n",
"on_enter=lambda node: self._enter_handler(\n node,\n is_on_lambda_str=None,\n is_on_lambda_body=None,\n)\n",
"def adjust_position(node):\n pos = node['pos']\n # Care 1-based index and the length of \"echo \".\n pos['col'] += start_pos['col'] - 1 - 5\n\n # Care the length of \"echo \".\n pos['i'] += start_pos['i'] - 5\n\n # Care 1-based index\n pos['lnum'] += start_pos['lnum'] - 1\n",
"def adjust_position(node):\n pos = node['pos']\n\n # Care 1-based index and the length of \"echo \".\n pos['col'] += start_pos['col'] - 1 - 5\n\n # Care the length of \"echo \".\n pos['i'] += start_pos['i'] - 5\n\n # Care 1-based index\n pos['lnum'] += start_pos['lnum'] - 1\n",
"def enter_handler(node):\n node_type = NodeType(node['type'])\n if node_type is not NodeType.EXCMD:\n return\n\n is_redir_command = node['ea']['cmd'].get('name') == 'redir'\n if not is_redir_command:\n return\n\n redir_cmd_str = node['str']\n is_redir_assignment = '=>' in redir_cmd_str\n if not is_redir_assignment:\n return\n\n parser = Parser()\n redir_content_node = parser.parse_redir(node)\n node[REDIR_CONTENT] = redir_content_node\n",
"def enter_handler(node):\n if NodeType(node['type']) is not NodeType.IDENTIFIER:\n return\n\n is_map_and_filter_content_visited[node['value']] = True\n",
"def on_enter_handler(node):\n if IDENTIFIER_ATTRIBUTE not in node:\n return\n\n id_name = node['value']\n footstamps[id_name] = True\n\n self.assertEqual(\n expected_id_attr_map[id_name],\n node[IDENTIFIER_ATTRIBUTE],\n \"Identifier Attribute of {1}({0}) have unexpected differences\".format(NodeType(node['type']), id_name)\n )\n",
"def enter_handler(node):\n if NodeType(node['type']) is not NodeType.IDENTIFIER:\n return\n\n is_redir_content_visited[node['value']] = True\n",
"def enter_handler(node):\n if is_declarative_identifier(node):\n id_name = node['value']\n\n pprint(node)\n self.assertEqual(expected_variables_unused[id_name],\n scope_plugin.is_unused_declarative_identifier(node))\n dec_id_footstamp_map[id_name] = True\n",
"def enter_handler(node):\n if is_reference_identifier(node):\n id_name = node['value']\n\n pprint(node)\n self.assertEqual(expected_variables_undeclared[id_name],\n scope_plugin.is_unreachable_reference_identifier(node))\n ref_id_footstamp_map[id_name] = True\n",
"on_enter=lambda node: actual_order_of_events.append({\n 'node_type': NodeType(node['type']),\n 'handler': 'enter',\n}),\n",
"on_leave=lambda node: actual_order_of_events.append({\n 'node_type': NodeType(node['type']),\n 'handler': 'leave',\n}))\n",
"def on_enter(node):\n actual_order_of_events.append({\n 'node_type': NodeType(node['type']),\n 'handler': 'enter',\n })\n\n if NodeType(node['type']) is NodeType.WHILE:\n return SKIP_CHILDREN\n",
"on_leave=lambda node: actual_order_of_events.append({\n 'node_type': NodeType(node['type']),\n 'handler': 'leave',\n}))\n",
"def _enter_handler(self, node):\n if not _is_identifier_like_node(node):\n return\n\n # FIXME: Dynamic identifiers should be returned and it should be filtered by the caller.\n if _is_dynamic_identifier(node) or _is_member_identifier(node) or _is_variadic_symbol(node):\n return\n\n if _is_declarative_identifier(node):\n self._static_declaring_identifiers.append(node)\n else:\n self._static_referencing_identifiers.append(node)\n"
] |
from vint.ast.node_type import NodeType
SKIP_CHILDREN = 'SKIP_CHILDREN'
def for_each(func, nodes):
""" Calls func for each the specified nodes. """
for node in nodes:
call_if_def(func, node)
def for_each_deeply(func, node_lists):
""" Calls func for each the specified nodes. """
for nodes in node_lists:
for_each(func, nodes)
def call_if_def(func, node):
""" Calls func if the node is defined.
VimLParser return an empty array if a child node is not defined.
"""
if hasattr(node, 'type'):
func(node)
ChildNodeAccessor = {
'NODE': call_if_def,
'LIST': for_each,
'NESTED_LIST': for_each_deeply,
}
ChildType = {
'LEFT': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'left',
},
'RIGHT': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'right',
},
'COND': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'cond',
},
'REST': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'rest',
},
'LIST': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'list',
},
'RLIST': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'rlist',
},
'BODY': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'body',
},
'LIST_VALUES': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'value',
},
'DICT_ENTRIES': {
'accessor': ChildNodeAccessor['NESTED_LIST'],
'property_name': 'value',
},
'CURLYNAME_VALUES': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'value',
},
'CURLYNAMEEXPR_VALUES': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'value',
},
'ELSEIF': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'elseif',
},
'ELSE': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'else_',
},
'ENDIF': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'endif',
},
'ENDWHILE': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'endwhile',
},
'ENDFOR': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'endfor',
},
'CATCH': {
'accessor': ChildNodeAccessor['LIST'],
'property_name': 'catch',
},
'FINALLY': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'finally_',
},
'ENDTRY': {
'accessor': ChildNodeAccessor['NODE'],
'property_name': 'endtry',
},
}
ChildNodeAccessorMap = {
NodeType.TOPLEVEL: [ChildType['BODY']],
NodeType.COMMENT: [],
NodeType.EXCMD: [],
NodeType.FUNCTION: [ChildType['LEFT'], ChildType['RLIST'], ChildType['BODY']],
NodeType.ENDFUNCTION: [],
NodeType.DELFUNCTION: [ChildType['LEFT']],
NodeType.RETURN: [ChildType['LEFT']],
NodeType.EXCALL: [ChildType['LEFT']],
NodeType.LET: [ChildType['LEFT'], ChildType['LIST'], ChildType['REST'], ChildType['RIGHT']],
NodeType.UNLET: [ChildType['LIST']],
NodeType.LOCKVAR: [ChildType['LIST']],
NodeType.UNLOCKVAR: [ChildType['LIST']],
NodeType.IF: [ChildType['COND'], ChildType['BODY'], ChildType['ELSEIF'], ChildType['ELSE'], ChildType['ENDIF']],
NodeType.ELSEIF: [ChildType['COND'], ChildType['BODY']],
NodeType.ELSE: [ChildType['BODY']],
NodeType.ENDIF: [],
NodeType.WHILE: [ChildType['COND'], ChildType['BODY'], ChildType['ENDWHILE']],
NodeType.ENDWHILE: [],
NodeType.FOR: [ChildType['LEFT'], ChildType['LIST'], ChildType['RIGHT'], ChildType['REST'], ChildType['BODY'], ChildType['ENDFOR']],
NodeType.ENDFOR: [],
NodeType.CONTINUE: [],
NodeType.BREAK: [],
NodeType.TRY: [ChildType['BODY'], ChildType['CATCH'], ChildType['FINALLY'], ChildType['ENDTRY']],
NodeType.CATCH: [ChildType['BODY']],
NodeType.FINALLY: [ChildType['BODY']],
NodeType.ENDTRY: [],
NodeType.THROW: [ChildType['LEFT']],
NodeType.ECHO: [ChildType['LIST']],
NodeType.ECHON: [ChildType['LIST']],
NodeType.ECHOHL: [],
NodeType.ECHOMSG: [ChildType['LIST']],
NodeType.ECHOERR: [ChildType['LIST']],
NodeType.EXECUTE: [ChildType['LIST']],
NodeType.TERNARY: [ChildType['COND'], ChildType['LEFT'], ChildType['RIGHT']],
NodeType.OR: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.AND: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.EQUAL: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.EQUALCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.EQUALCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NEQUAL: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NEQUALCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NEQUALCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GREATER: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GREATERCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GREATERCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GEQUAL: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GEQUALCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.GEQUALCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SMALLER: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SMALLERCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SMALLERCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SEQUAL: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SEQUALCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SEQUALCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.MATCH: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.MATCHCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.MATCHCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NOMATCH: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NOMATCHCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NOMATCHCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.IS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ISCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ISCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ISNOT: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ISNOTCI: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ISNOTCS: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.ADD: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SUBTRACT: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.CONCAT: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.MULTIPLY: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.DIVIDE: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.REMAINDER: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NOT: [ChildType['LEFT']],
NodeType.MINUS: [ChildType['LEFT']],
NodeType.PLUS: [ChildType['LEFT']],
NodeType.SUBSCRIPT: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.SLICE: [ChildType['LEFT'], ChildType['RLIST']],
NodeType.CALL: [ChildType['LEFT'], ChildType['RLIST']],
NodeType.DOT: [ChildType['LEFT'], ChildType['RIGHT']],
NodeType.NUMBER: [],
NodeType.STRING: [],
NodeType.LIST: [ChildType['LIST_VALUES']],
NodeType.DICT: [ChildType['DICT_ENTRIES']],
NodeType.NESTING: [ChildType['LEFT']],
NodeType.OPTION: [],
NodeType.IDENTIFIER: [],
NodeType.CURLYNAME: [ChildType['CURLYNAME_VALUES']],
NodeType.ENV: [],
NodeType.REG: [],
NodeType.CURLYNAMEPART: [],
NodeType.CURLYNAMEEXPR: [ChildType['CURLYNAMEEXPR_VALUES']],
NodeType.LAMBDA: [ChildType['RLIST'], ChildType['LEFT']],
}
class UnknownNodeTypeException(BaseException):
def __init__(self, node_type):
self.node_type = node_type
def __str__(self):
node_type_name = self.node_type
return 'Unknown node type: `{node_type}`'.format(node_type=node_type_name)
_traverser_extensions = []
def register_traverser_extension(handler):
""" Registers the specified function to traverse into extended child nodes.
"""
_traverser_extensions.append(handler)
|
Kuniwak/vint
|
vint/linting/policy_set.py
|
PolicySet.update_by_config
|
python
|
def update_by_config(self, config_dict):
policy_enabling_map = self._get_enabling_map(config_dict)
self.enabled_policies = []
for policy_name, is_policy_enabled in policy_enabling_map.items():
if not self._is_policy_exists(policy_name):
self._warn_unexistent_policy(policy_name)
continue
if is_policy_enabled:
enabled_policy = self._get_policy(policy_name)
self.enabled_policies.append(enabled_policy)
|
Update policies set by the config dictionary.
Expect the policy_enabling_map structure to be (represented by YAML):
- PolicyFoo:
enabled: True
- PolicyBar:
enabled: False
additional_field: 'is_ok'
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy_set.py#L48-L68
|
[
"def _is_policy_exists(self, name):\n return name in self._all_policies_map\n",
"def _get_enabling_map(self, config_dict):\n severity = config_dict['cmdargs']['severity']\n policy_enabling_map = {}\n\n for policy_name, policy in self._all_policies_map.items():\n policy_enabling_map[policy_name] = is_level_enabled(policy.level, severity)\n\n prior_policy_enabling_map = config_dict['policies']\n\n for policy_name, policy in prior_policy_enabling_map.items():\n if 'enabled' in policy:\n policy_enabling_map[policy_name] = policy['enabled']\n\n return policy_enabling_map\n"
] |
class PolicySet(object):
def __init__(self, policy_classes):
self._all_policies_map = PolicySet.create_all_policies_map(policy_classes)
self.enabled_policies = []
@classmethod
def create_all_policies_map(cls, policy_classes):
policy_map = {PolicyClass.__name__: PolicyClass() for PolicyClass in policy_classes}
return policy_map
def _is_policy_exists(self, name):
return name in self._all_policies_map
def _get_policy(self, name):
return self._all_policies_map[name]
def _warn_unexistent_policy(self, policy_name):
logging.warning('Policy `{name}` is not defined'.format(
name=policy_name))
def _get_enabling_map(self, config_dict):
severity = config_dict['cmdargs']['severity']
policy_enabling_map = {}
for policy_name, policy in self._all_policies_map.items():
policy_enabling_map[policy_name] = is_level_enabled(policy.level, severity)
prior_policy_enabling_map = config_dict['policies']
for policy_name, policy in prior_policy_enabling_map.items():
if 'enabled' in policy:
policy_enabling_map[policy_name] = policy['enabled']
return policy_enabling_map
def get_enabled_policies(self):
""" Returns enabled policies. """
return self.enabled_policies
|
Kuniwak/vint
|
vint/linting/cli.py
|
_build_cmdargs
|
python
|
def _build_cmdargs(argv):
parser = _build_arg_parser()
namespace = parser.parse_args(argv[1:])
cmdargs = vars(namespace)
return cmdargs
|
Build command line arguments dict to use;
- displaying usages
- vint.linting.env.build_environment
This method take an argv parameter to make function pure.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/cli.py#L79-L90
| null |
from typing import Dict, Any, List # noqa: F401
import sys
from argparse import ArgumentParser
from pathlib import Path
import pkg_resources
import logging
from vint.linting.linter import Linter
from vint.linting.env import build_environment
from vint.linting.config.config_container import ConfigContainer
from vint.linting.config.config_cmdargs_source import ConfigCmdargsSource
from vint.linting.config.config_default_source import ConfigDefaultSource
from vint.linting.config.config_global_source import ConfigGlobalSource
from vint.linting.config.config_project_source import ConfigProjectSource
from vint.linting.config.config_util import get_config_value
from vint.linting.lint_target import (
AbstractLintTarget,
LintTargetFile,
LintTargetBufferedStream,
CachedLintTarget,
)
from vint.linting.policy_set import PolicySet
from vint.linting.formatter.abstract_formatter import AbstractFormatter
from vint.linting.policy_registry import get_policy_classes
from vint.linting.formatter.formatter import Formatter
from vint.linting.formatter.json_formatter import JSONFormatter
from vint.linting.formatter.statistic_formatter import StatisticFormatter
_stdin_symbol = Path('-')
def start_cli():
env = _build_env(sys.argv)
_validate(env)
_adjust_log_level(env)
config_dict = _build_config_dict(env)
violations = _lint_all(env, config_dict)
parser = _build_arg_parser()
if len(violations) == 0:
parser.exit(status=0)
_print_violations(violations, config_dict)
parser.exit(status=1)
def _validate(env): # type: (Dict[str, Any]) -> None
parser = _build_arg_parser()
paths_to_lint = env['file_paths']
if len(paths_to_lint) == 0:
logging.error('nothing to check')
parser.print_help()
parser.exit(status=1)
if paths_to_lint.count(_stdin_symbol) > 1:
logging.error('number of "-" must be less than 2')
parser.exit(status=1)
for path_to_lint in filter(lambda path: path != _stdin_symbol, paths_to_lint):
if not path_to_lint.exists() or not path_to_lint.is_file():
logging.error('no such file or directory: `{path}`'.format(
path=str(path_to_lint)))
parser.exit(status=1)
def _build_env(argv):
""" Build an environment object.
This method take an argv parameter to make function pure.
"""
cmdargs = _build_cmdargs(argv)
env = build_environment(cmdargs)
return env
def _build_arg_parser():
parser = ArgumentParser(prog='vint', description='Lint Vim script')
parser.add_argument('-v', '--version', action='version', version=_get_version())
parser.add_argument('-V', '--verbose', action='store_const', const=True, help='output verbose message')
parser.add_argument('-e', '--error', action='store_const', const=True, help='report only errors')
parser.add_argument('-w', '--warning', action='store_const', const=True, help='report errors and warnings')
parser.add_argument('-s', '--style-problem', action='store_const', const=True, help='report errors, warnings and style problems')
parser.add_argument('-m', '--max-violations', type=int, help='limit max violations count')
parser.add_argument('-c', '--color', action='store_const', const=True, help='colorize output when possible')
parser.add_argument('--no-color', action='store_const', const=True, help='do not colorize output')
parser.add_argument('-j', '--json', action='store_const', const=True, help='output json style')
parser.add_argument('-t', '--stat', action='store_const', const=True, help='output statistic info')
parser.add_argument('--enable-neovim', action='store_const', const=True, help='enable Neovim syntax')
parser.add_argument('-f', '--format', help='set output format')
parser.add_argument('--stdin-display-name', type=str, help='specify a file path that is used for reporting when linting standard inputs')
parser.add_argument('files', nargs='*', help='file or directory path to lint')
return parser
def _build_config_dict(env): # type: (Dict[str, Any]) -> Dict[str, Any]
config = ConfigContainer(
ConfigDefaultSource(env),
ConfigGlobalSource(env),
ConfigProjectSource(env),
ConfigCmdargsSource(env),
)
return config.get_config_dict()
def _lint_all(env, config_dict): # type: (Dict[str, Any], Dict[str, Any]) -> List[Dict[str, Any]]
paths_to_lint = env['file_paths']
violations = []
linter = _build_linter(config_dict)
for path in paths_to_lint:
lint_target = _build_lint_target(path, config_dict)
violations += linter.lint(lint_target)
return violations
def _build_linter(config_dict): # type: (Dict[str, Any]) -> Linter
policy_set = PolicySet(get_policy_classes())
linter = Linter(policy_set, config_dict)
return linter
def _print_violations(violations, config_dict): # type: (List[Dict[str, Any]], Dict[str, Any]) -> None
formatter = _build_formatter(config_dict)
output = formatter.format_violations(violations)
print(output)
def _build_formatter(config_dict): # type: (Dict[str, Any]) -> AbstractFormatter
if 'cmdargs' not in config_dict:
return Formatter(config_dict)
cmdargs = config_dict['cmdargs']
if 'json' in cmdargs and cmdargs['json']:
return JSONFormatter()
elif 'stat' in cmdargs and cmdargs['stat']:
return StatisticFormatter(config_dict)
else:
return Formatter(config_dict)
def _get_version():
from ..__version__ import version
return version
def _adjust_log_level(env):
cmdargs = env['cmdargs']
is_verbose = cmdargs.get('verbose', False)
log_level = logging.DEBUG if is_verbose else logging.WARNING
logger = logging.getLogger()
logger.setLevel(log_level)
def _build_lint_target(path, config_dict): # type: (Path, Dict[str, Any]) -> AbstractLintTarget
if path == _stdin_symbol:
stdin_alt_path = get_config_value(config_dict, ['cmdargs', 'stdin_display_name'])
# NOTE: In Python 3, sys.stdin is a string not bytes. Then we can get bytes by sys.stdin.buffer.
# But in Python 2, sys.stdin.buffer is not defined. But we can get bytes by sys.stdin directly.
is_python_3 = hasattr(sys.stdin, 'buffer')
if is_python_3:
lint_target = LintTargetBufferedStream(
alternate_path=Path(stdin_alt_path),
buffered_io=sys.stdin.buffer
)
else:
# NOTE: Python 2 on Windows opens sys.stdin in text mode, and
# binary data that read from it becomes corrupted on \r\n
# SEE: https://stackoverflow.com/questions/2850893/reading-binary-data-from-stdin/38939320#38939320
if sys.platform == 'win32':
# set sys.stdin to binary mode
import os, msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
lint_target = LintTargetBufferedStream(
alternate_path=Path(stdin_alt_path),
buffered_io=sys.stdin
)
return CachedLintTarget(lint_target)
else:
lint_target = LintTargetFile(path)
return CachedLintTarget(lint_target)
|
Kuniwak/vint
|
vint/ast/parsing.py
|
Parser.parse
|
python
|
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized)
|
Parse vim script file and return the AST.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L19-L25
|
[
"def decode(self, bytes_seq):\n # type: (bytes) -> str\n strings = []\n\n for (loc, hunk) in _split_by_scriptencoding(bytes_seq):\n debug_hint_for_the_loc = dict()\n self.debug_hint[loc] = debug_hint_for_the_loc\n\n string = self.strategy.decode(hunk, debug_hint=debug_hint_for_the_loc)\n\n if string is None:\n raise EncodingDetectionError(self.debug_hint)\n\n strings.append(string)\n\n return ''.join(strings)\n",
"def read(self): # type: () -> bytes\n with self.path.open('rb') as f:\n return f.read()\n",
"def parse_string(self, string): # type: (str) -> Dict[str, Any]\n \"\"\" Parse vim script string and return the AST. \"\"\"\n lines = string.split('\\n')\n\n reader = vimlparser.StringReader(lines)\n parser = vimlparser.VimLParser(self._enable_neovim)\n ast = parser.parse(reader)\n\n # TOPLEVEL does not have a pos, but we need pos for all nodes\n ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}\n\n for plugin in self.plugins:\n plugin.process(ast)\n\n return ast\n"
] |
class Parser(object):
def __init__(self, plugins=None, enable_neovim=False):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins if plugins else []
self._enable_neovim = enable_neovim
def parse_string(self, string): # type: (str) -> Dict[str, Any]
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = vimlparser.StringReader(lines)
parser = vimlparser.VimLParser(self._enable_neovim)
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_redir(self, redir_cmd):
""" Parse a command :redir content. """
redir_cmd_str = redir_cmd['str']
matched = re.match(r'redir?!?\s*(=>>?\s*)(\S+)', redir_cmd_str)
if matched:
redir_cmd_op = matched.group(1)
redir_cmd_body = matched.group(2)
arg_pos = redir_cmd['ea']['argpos']
# Position of the "redir_cmd_body"
start_pos = {
'col': arg_pos['col'] + len(redir_cmd_op),
'i': arg_pos['i'] + len(redir_cmd_op),
'lnum': arg_pos['lnum'],
}
# NOTE: This is a hack to parse variable node.
raw_ast = self.parse_string('echo ' + redir_cmd_body)
# We need the left node of ECHO node
redir_cmd_ast = raw_ast['body'][0]['list'][0]
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
traverse(redir_cmd_ast, on_enter=adjust_position)
return redir_cmd_ast
return None
def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes
|
Kuniwak/vint
|
vint/ast/parsing.py
|
Parser.parse_string
|
python
|
def parse_string(self, string): # type: (str) -> Dict[str, Any]
lines = string.split('\n')
reader = vimlparser.StringReader(lines)
parser = vimlparser.VimLParser(self._enable_neovim)
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
|
Parse vim script string and return the AST.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L28-L42
|
[
"def parse(self, reader):\n self.reader = reader\n self.context = []\n toplevel = Node(NODE_TOPLEVEL)\n toplevel.pos = self.reader.getpos()\n toplevel.body = []\n self.push_context(toplevel)\n while self.reader.peek() != \"<EOF>\":\n self.parse_one_cmd()\n self.check_missing_endfunction(\"TOPLEVEL\", self.reader.getpos())\n self.check_missing_endif(\"TOPLEVEL\", self.reader.getpos())\n self.check_missing_endtry(\"TOPLEVEL\", self.reader.getpos())\n self.check_missing_endwhile(\"TOPLEVEL\", self.reader.getpos())\n self.check_missing_endfor(\"TOPLEVEL\", self.reader.getpos())\n self.pop_context()\n return toplevel\n"
] |
class Parser(object):
def __init__(self, plugins=None, enable_neovim=False):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins if plugins else []
self._enable_neovim = enable_neovim
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
""" Parse vim script file and return the AST. """
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized)
def parse_redir(self, redir_cmd):
""" Parse a command :redir content. """
redir_cmd_str = redir_cmd['str']
matched = re.match(r'redir?!?\s*(=>>?\s*)(\S+)', redir_cmd_str)
if matched:
redir_cmd_op = matched.group(1)
redir_cmd_body = matched.group(2)
arg_pos = redir_cmd['ea']['argpos']
# Position of the "redir_cmd_body"
start_pos = {
'col': arg_pos['col'] + len(redir_cmd_op),
'i': arg_pos['i'] + len(redir_cmd_op),
'lnum': arg_pos['lnum'],
}
# NOTE: This is a hack to parse variable node.
raw_ast = self.parse_string('echo ' + redir_cmd_body)
# We need the left node of ECHO node
redir_cmd_ast = raw_ast['body'][0]['list'][0]
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
traverse(redir_cmd_ast, on_enter=adjust_position)
return redir_cmd_ast
return None
def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes
|
Kuniwak/vint
|
vint/ast/parsing.py
|
Parser.parse_redir
|
python
|
def parse_redir(self, redir_cmd):
redir_cmd_str = redir_cmd['str']
matched = re.match(r'redir?!?\s*(=>>?\s*)(\S+)', redir_cmd_str)
if matched:
redir_cmd_op = matched.group(1)
redir_cmd_body = matched.group(2)
arg_pos = redir_cmd['ea']['argpos']
# Position of the "redir_cmd_body"
start_pos = {
'col': arg_pos['col'] + len(redir_cmd_op),
'i': arg_pos['i'] + len(redir_cmd_op),
'lnum': arg_pos['lnum'],
}
# NOTE: This is a hack to parse variable node.
raw_ast = self.parse_string('echo ' + redir_cmd_body)
# We need the left node of ECHO node
redir_cmd_ast = raw_ast['body'][0]['list'][0]
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
traverse(redir_cmd_ast, on_enter=adjust_position)
return redir_cmd_ast
return None
|
Parse a command :redir content.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L45-L84
|
[
"def traverse(node, on_enter=None, on_leave=None):\n \"\"\" Traverses the specified Vim script AST node (depth first order).\n The on_enter/on_leave handler will be called with the specified node and\n the children. You can skip traversing child nodes by returning\n SKIP_CHILDREN.\n \"\"\"\n node_type = NodeType(node['type'])\n\n if node_type not in ChildNodeAccessorMap:\n raise UnknownNodeTypeException(node_type)\n\n if on_enter:\n should_traverse_children = on_enter(node) is not SKIP_CHILDREN\n else:\n should_traverse_children = True\n\n if should_traverse_children:\n for property_accessor in ChildNodeAccessorMap[node_type]:\n accessor_func = property_accessor['accessor']\n prop_name = property_accessor['property_name']\n\n accessor_func(lambda child_node: traverse(child_node, on_enter, on_leave),\n node[prop_name])\n\n for handler in _traverser_extensions:\n handler(node, on_enter=on_enter, on_leave=on_leave)\n\n if on_leave:\n on_leave(node)\n",
"def parse_string(self, string): # type: (str) -> Dict[str, Any]\n \"\"\" Parse vim script string and return the AST. \"\"\"\n lines = string.split('\\n')\n\n reader = vimlparser.StringReader(lines)\n parser = vimlparser.VimLParser(self._enable_neovim)\n ast = parser.parse(reader)\n\n # TOPLEVEL does not have a pos, but we need pos for all nodes\n ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}\n\n for plugin in self.plugins:\n plugin.process(ast)\n\n return ast\n"
] |
class Parser(object):
def __init__(self, plugins=None, enable_neovim=False):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins if plugins else []
self._enable_neovim = enable_neovim
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
""" Parse vim script file and return the AST. """
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized)
def parse_string(self, string): # type: (str) -> Dict[str, Any]
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = vimlparser.StringReader(lines)
parser = vimlparser.VimLParser(self._enable_neovim)
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_string_expr(self, string_expr_node):
""" Parse a string node content. """
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes
|
Kuniwak/vint
|
vint/ast/parsing.py
|
Parser.parse_string_expr
|
python
|
def parse_string_expr(self, string_expr_node):
string_expr_node_value = string_expr_node['value']
string_expr_str = string_expr_node_value[1:-1]
# Care escaped string literals
if string_expr_node_value[0] == "'":
string_expr_str = string_expr_str.replace("''", "'")
else:
string_expr_str = string_expr_str.replace('\\"', '"')
# NOTE: This is a hack to parse expr1. See :help expr1
raw_ast = self.parse_string('echo ' + string_expr_str)
# We need the left node of ECHO node
parsed_string_expr_nodes = raw_ast['body'][0]['list']
start_pos = string_expr_node['pos']
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
for parsed_string_expr_node in parsed_string_expr_nodes:
traverse(parsed_string_expr_node, on_enter=adjust_position)
return parsed_string_expr_nodes
|
Parse a string node content.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L87-L121
|
[
"def traverse(node, on_enter=None, on_leave=None):\n \"\"\" Traverses the specified Vim script AST node (depth first order).\n The on_enter/on_leave handler will be called with the specified node and\n the children. You can skip traversing child nodes by returning\n SKIP_CHILDREN.\n \"\"\"\n node_type = NodeType(node['type'])\n\n if node_type not in ChildNodeAccessorMap:\n raise UnknownNodeTypeException(node_type)\n\n if on_enter:\n should_traverse_children = on_enter(node) is not SKIP_CHILDREN\n else:\n should_traverse_children = True\n\n if should_traverse_children:\n for property_accessor in ChildNodeAccessorMap[node_type]:\n accessor_func = property_accessor['accessor']\n prop_name = property_accessor['property_name']\n\n accessor_func(lambda child_node: traverse(child_node, on_enter, on_leave),\n node[prop_name])\n\n for handler in _traverser_extensions:\n handler(node, on_enter=on_enter, on_leave=on_leave)\n\n if on_leave:\n on_leave(node)\n",
"def parse_string(self, string): # type: (str) -> Dict[str, Any]\n \"\"\" Parse vim script string and return the AST. \"\"\"\n lines = string.split('\\n')\n\n reader = vimlparser.StringReader(lines)\n parser = vimlparser.VimLParser(self._enable_neovim)\n ast = parser.parse(reader)\n\n # TOPLEVEL does not have a pos, but we need pos for all nodes\n ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}\n\n for plugin in self.plugins:\n plugin.process(ast)\n\n return ast\n"
] |
class Parser(object):
def __init__(self, plugins=None, enable_neovim=False):
""" Initialize Parser with the specified plugins.
The plugins can add attributes to the AST.
"""
self.plugins = plugins if plugins else []
self._enable_neovim = enable_neovim
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any]
""" Parse vim script file and return the AST. """
decoder = Decoder(default_decoding_strategy)
decoded = decoder.decode(lint_target.read())
decoded_and_lf_normalized = decoded.replace('\r\n', '\n')
return self.parse_string(decoded_and_lf_normalized)
def parse_string(self, string): # type: (str) -> Dict[str, Any]
""" Parse vim script string and return the AST. """
lines = string.split('\n')
reader = vimlparser.StringReader(lines)
parser = vimlparser.VimLParser(self._enable_neovim)
ast = parser.parse(reader)
# TOPLEVEL does not have a pos, but we need pos for all nodes
ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1}
for plugin in self.plugins:
plugin.process(ast)
return ast
def parse_redir(self, redir_cmd):
""" Parse a command :redir content. """
redir_cmd_str = redir_cmd['str']
matched = re.match(r'redir?!?\s*(=>>?\s*)(\S+)', redir_cmd_str)
if matched:
redir_cmd_op = matched.group(1)
redir_cmd_body = matched.group(2)
arg_pos = redir_cmd['ea']['argpos']
# Position of the "redir_cmd_body"
start_pos = {
'col': arg_pos['col'] + len(redir_cmd_op),
'i': arg_pos['i'] + len(redir_cmd_op),
'lnum': arg_pos['lnum'],
}
# NOTE: This is a hack to parse variable node.
raw_ast = self.parse_string('echo ' + redir_cmd_body)
# We need the left node of ECHO node
redir_cmd_ast = raw_ast['body'][0]['list'][0]
def adjust_position(node):
pos = node['pos']
# Care 1-based index and the length of "echo ".
pos['col'] += start_pos['col'] - 1 - 5
# Care the length of "echo ".
pos['i'] += start_pos['i'] - 5
# Care 1-based index
pos['lnum'] += start_pos['lnum'] - 1
traverse(redir_cmd_ast, on_enter=adjust_position)
return redir_cmd_ast
return None
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/scope_detector.py
|
is_builtin_variable
|
python
|
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool
# Builtin variables are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if id_value.startswith('v:'):
# It is an explicit builtin variable such as: "v:count", "v:char"
# TODO: Add unknown builtin flag
return True
if is_builtin_function(id_node):
return True
if id_value in ['key', 'val']:
# These builtin variable names are available on only map() or filter().
return is_on_lambda_string_context(id_node)
# It is an implicit builtin variable such as: "count", "char"
return id_value in BuiltinVariablesCanHaveImplicitScope
|
Whether the specified node is a builtin identifier.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L69-L90
| null |
from typing import Dict, Any, Optional # noqa: F401
from vint.ast.plugin.scope_plugin.scope import (
ScopeVisibility,
ExplicityOfScopeVisibility,
Scope,
)
from vint.ast.node_type import NodeType
from vint.ast.dictionary.builtins import (
BuiltinVariablesCanHaveImplicitScope,
BuiltinFunctions,
)
from vint.ast.plugin.scope_plugin.identifier_attribute import (
is_dynamic_identifier,
is_declarative_identifier,
is_function_identifier,
is_member_identifier,
is_on_lambda_string_context,
is_lambda_argument_identifier,
is_function_argument,
)
class ScopeVisibilityHint:
def __init__(self, scope_visibility, explicity):
# type: (ScopeVisibility, ExplicityOfScopeVisibility) -> None
self.scope_visibility = scope_visibility
self.explicity = explicity
FunctionDeclarationIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.INVALID,
'w:': ScopeVisibility.INVALID,
't:': ScopeVisibility.INVALID,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.INVALID,
'a:': ScopeVisibility.INVALID,
'v:': ScopeVisibility.INVALID,
}
VariableIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.GLOBAL_LIKE,
'w:': ScopeVisibility.GLOBAL_LIKE,
't:': ScopeVisibility.GLOBAL_LIKE,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.FUNCTION_LOCAL,
'a:': ScopeVisibility.FUNCTION_LOCAL,
'v:': ScopeVisibility.BUILTIN,
}
GlobalLikeScopeVisibilityNodeTypes = {
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
IdentifierLikeNodeTypes = {
NodeType.IDENTIFIER: True,
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin function name identifier.
The given identifier should be a child node of NodeType.CALL.
"""
# Builtin functions are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if not is_function_identifier(id_node):
return False
# There are difference between a function identifier and variable
# identifier:
#
# let localtime = 0
# echo localtime " => 0
# echo localtime() " => 1420011455
return id_value in BuiltinFunctions
def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
- echo s:var
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
- echo s:my_{var}
"""
return not (is_dynamic_identifier(node) or is_member_identifier(node))
def is_analyzable_declarative_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable declarative identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
"""
return is_declarative_identifier(node) and is_analyzable_identifier(node)
def detect_possible_scope_visibility(node, context_scope): # type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
""" Returns a *possible* variable visibility by the specified node.
The "possible" means that we can not determine a scope visibility of lambda arguments until reachability check.
"""
node_type = NodeType(node['type'])
if not is_analyzable_identifier(node):
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
if node_type is NodeType.IDENTIFIER:
return _detect_possible_identifier_scope_visibility(node, context_scope)
if node_type in GlobalLikeScopeVisibilityNodeTypes:
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
def _detect_possible_identifier_scope_visibility(id_node, context_scope):
# type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
explicit_scope_visibility = _get_explicit_scope_visibility(id_node)
if explicit_scope_visibility is not None:
# Vim allow `g:` as a function name prefix but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
is_unrecommended_explicit = is_function_identifier(id_node) and _is_just_global(id_node)
if is_unrecommended_explicit:
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.UNRECOMMENDED_EXPLICIT
)
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.EXPLICIT
)
if is_function_argument(id_node):
# Function arguments can not have any explicit scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_function(id_node):
# Builtin functions can not have any scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_variable(id_node):
# Implicit scope variable will be resolved as a builtin variable if it
# has a same name to Vim builtin variables.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT
)
if is_function_identifier(id_node):
# Functions can have the scope visibility only explicit global or
# implicit global or explicit script local. So a function have implicit
# scope visibility is always a global function.
#
# And the explicity should be implicit. Vim allow `g:` but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if not context_scope:
# We can not detect implicit scope visibility if context scope is not
# specified.
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
current_scope_visibility = context_scope.scope_visibility
# A lambda argument declaration or the references can not have any explicit scope prefix.
if current_scope_visibility is ScopeVisibility.LAMBDA:
if is_lambda_argument_identifier(id_node):
# It can not have any explicit scope prefix.
explicity = ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
else:
# We can not detect the scope of an implicit variable until
# we know whether the variable can reach to a lambda argument or not.
# If it can reach to a lambda argument, then it is IMPLICIT_BUT_CONSTRAINED otherwise IMPLICIT.
explicity = ExplicityOfScopeVisibility.IMPLICIT_OR_LAMBDA
else:
explicity = ExplicityOfScopeVisibility.IMPLICIT
if current_scope_visibility is ScopeVisibility.SCRIPT_LOCAL:
# Implicit scope variable will be resolved as a global variable when
# current scope is script local.
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
explicity
)
# Otherwise be a function local variable.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
explicity
)
def _get_explicit_scope_visibility(id_node): # type: (Dict[str, Any]) -> Optional[ScopeVisibility]
# See :help internal-variables
scope_prefix = id_node['value'][0:2]
if is_function_identifier(id_node) and is_declarative_identifier(id_node):
return FunctionDeclarationIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
else:
return VariableIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
def _is_just_global(id_node): # type: (Dict[str, Any]) -> bool
# See :help internal-variables
return id_node['value'][0:2] == 'g:'
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/scope_detector.py
|
is_builtin_function
|
python
|
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool
# Builtin functions are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if not is_function_identifier(id_node):
return False
# There are difference between a function identifier and variable
# identifier:
#
# let localtime = 0
# echo localtime " => 0
# echo localtime() " => 1420011455
return id_value in BuiltinFunctions
|
Whether the specified node is a builtin function name identifier.
The given identifier should be a child node of NodeType.CALL.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L93-L112
| null |
from typing import Dict, Any, Optional # noqa: F401
from vint.ast.plugin.scope_plugin.scope import (
ScopeVisibility,
ExplicityOfScopeVisibility,
Scope,
)
from vint.ast.node_type import NodeType
from vint.ast.dictionary.builtins import (
BuiltinVariablesCanHaveImplicitScope,
BuiltinFunctions,
)
from vint.ast.plugin.scope_plugin.identifier_attribute import (
is_dynamic_identifier,
is_declarative_identifier,
is_function_identifier,
is_member_identifier,
is_on_lambda_string_context,
is_lambda_argument_identifier,
is_function_argument,
)
class ScopeVisibilityHint:
def __init__(self, scope_visibility, explicity):
# type: (ScopeVisibility, ExplicityOfScopeVisibility) -> None
self.scope_visibility = scope_visibility
self.explicity = explicity
FunctionDeclarationIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.INVALID,
'w:': ScopeVisibility.INVALID,
't:': ScopeVisibility.INVALID,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.INVALID,
'a:': ScopeVisibility.INVALID,
'v:': ScopeVisibility.INVALID,
}
VariableIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.GLOBAL_LIKE,
'w:': ScopeVisibility.GLOBAL_LIKE,
't:': ScopeVisibility.GLOBAL_LIKE,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.FUNCTION_LOCAL,
'a:': ScopeVisibility.FUNCTION_LOCAL,
'v:': ScopeVisibility.BUILTIN,
}
GlobalLikeScopeVisibilityNodeTypes = {
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
IdentifierLikeNodeTypes = {
NodeType.IDENTIFIER: True,
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin identifier. """
# Builtin variables are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if id_value.startswith('v:'):
# It is an explicit builtin variable such as: "v:count", "v:char"
# TODO: Add unknown builtin flag
return True
if is_builtin_function(id_node):
return True
if id_value in ['key', 'val']:
# These builtin variable names are available on only map() or filter().
return is_on_lambda_string_context(id_node)
# It is an implicit builtin variable such as: "count", "char"
return id_value in BuiltinVariablesCanHaveImplicitScope
def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
- echo s:var
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
- echo s:my_{var}
"""
return not (is_dynamic_identifier(node) or is_member_identifier(node))
def is_analyzable_declarative_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable declarative identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
"""
return is_declarative_identifier(node) and is_analyzable_identifier(node)
def detect_possible_scope_visibility(node, context_scope): # type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
""" Returns a *possible* variable visibility by the specified node.
The "possible" means that we can not determine a scope visibility of lambda arguments until reachability check.
"""
node_type = NodeType(node['type'])
if not is_analyzable_identifier(node):
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
if node_type is NodeType.IDENTIFIER:
return _detect_possible_identifier_scope_visibility(node, context_scope)
if node_type in GlobalLikeScopeVisibilityNodeTypes:
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
def _detect_possible_identifier_scope_visibility(id_node, context_scope):
# type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
explicit_scope_visibility = _get_explicit_scope_visibility(id_node)
if explicit_scope_visibility is not None:
# Vim allow `g:` as a function name prefix but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
is_unrecommended_explicit = is_function_identifier(id_node) and _is_just_global(id_node)
if is_unrecommended_explicit:
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.UNRECOMMENDED_EXPLICIT
)
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.EXPLICIT
)
if is_function_argument(id_node):
# Function arguments can not have any explicit scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_function(id_node):
# Builtin functions can not have any scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_variable(id_node):
# Implicit scope variable will be resolved as a builtin variable if it
# has a same name to Vim builtin variables.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT
)
if is_function_identifier(id_node):
# Functions can have the scope visibility only explicit global or
# implicit global or explicit script local. So a function have implicit
# scope visibility is always a global function.
#
# And the explicity should be implicit. Vim allow `g:` but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if not context_scope:
# We can not detect implicit scope visibility if context scope is not
# specified.
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
current_scope_visibility = context_scope.scope_visibility
# A lambda argument declaration or the references can not have any explicit scope prefix.
if current_scope_visibility is ScopeVisibility.LAMBDA:
if is_lambda_argument_identifier(id_node):
# It can not have any explicit scope prefix.
explicity = ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
else:
# We can not detect the scope of an implicit variable until
# we know whether the variable can reach to a lambda argument or not.
# If it can reach to a lambda argument, then it is IMPLICIT_BUT_CONSTRAINED otherwise IMPLICIT.
explicity = ExplicityOfScopeVisibility.IMPLICIT_OR_LAMBDA
else:
explicity = ExplicityOfScopeVisibility.IMPLICIT
if current_scope_visibility is ScopeVisibility.SCRIPT_LOCAL:
# Implicit scope variable will be resolved as a global variable when
# current scope is script local.
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
explicity
)
# Otherwise be a function local variable.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
explicity
)
def _get_explicit_scope_visibility(id_node): # type: (Dict[str, Any]) -> Optional[ScopeVisibility]
# See :help internal-variables
scope_prefix = id_node['value'][0:2]
if is_function_identifier(id_node) and is_declarative_identifier(id_node):
return FunctionDeclarationIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
else:
return VariableIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
def _is_just_global(id_node): # type: (Dict[str, Any]) -> bool
# See :help internal-variables
return id_node['value'][0:2] == 'g:'
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/scope_detector.py
|
detect_possible_scope_visibility
|
python
|
def detect_possible_scope_visibility(node, context_scope): # type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
node_type = NodeType(node['type'])
if not is_analyzable_identifier(node):
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
if node_type is NodeType.IDENTIFIER:
return _detect_possible_identifier_scope_visibility(node, context_scope)
if node_type in GlobalLikeScopeVisibilityNodeTypes:
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
|
Returns a *possible* variable visibility by the specified node.
The "possible" means that we can not determine a scope visibility of lambda arguments until reachability check.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L150-L174
|
[
"def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool\n \"\"\" Whether the specified node is an analyzable identifier.\n\n Node declarative-identifier-like is analyzable if it is not dynamic\n and not a member variable, because we can do static scope analysis.\n\n Analyzable cases:\n - let s:var = 0\n - function! Func()\n - echo s:var\n\n Unanalyzable cases:\n - let s:my_{var} = 0\n - function! dict.Func()\n - echo s:my_{var}\n \"\"\"\n return not (is_dynamic_identifier(node) or is_member_identifier(node))\n"
] |
from typing import Dict, Any, Optional # noqa: F401
from vint.ast.plugin.scope_plugin.scope import (
ScopeVisibility,
ExplicityOfScopeVisibility,
Scope,
)
from vint.ast.node_type import NodeType
from vint.ast.dictionary.builtins import (
BuiltinVariablesCanHaveImplicitScope,
BuiltinFunctions,
)
from vint.ast.plugin.scope_plugin.identifier_attribute import (
is_dynamic_identifier,
is_declarative_identifier,
is_function_identifier,
is_member_identifier,
is_on_lambda_string_context,
is_lambda_argument_identifier,
is_function_argument,
)
class ScopeVisibilityHint:
def __init__(self, scope_visibility, explicity):
# type: (ScopeVisibility, ExplicityOfScopeVisibility) -> None
self.scope_visibility = scope_visibility
self.explicity = explicity
FunctionDeclarationIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.INVALID,
'w:': ScopeVisibility.INVALID,
't:': ScopeVisibility.INVALID,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.INVALID,
'a:': ScopeVisibility.INVALID,
'v:': ScopeVisibility.INVALID,
}
VariableIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.GLOBAL_LIKE,
'w:': ScopeVisibility.GLOBAL_LIKE,
't:': ScopeVisibility.GLOBAL_LIKE,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.FUNCTION_LOCAL,
'a:': ScopeVisibility.FUNCTION_LOCAL,
'v:': ScopeVisibility.BUILTIN,
}
GlobalLikeScopeVisibilityNodeTypes = {
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
IdentifierLikeNodeTypes = {
NodeType.IDENTIFIER: True,
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin identifier. """
# Builtin variables are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if id_value.startswith('v:'):
# It is an explicit builtin variable such as: "v:count", "v:char"
# TODO: Add unknown builtin flag
return True
if is_builtin_function(id_node):
return True
if id_value in ['key', 'val']:
# These builtin variable names are available on only map() or filter().
return is_on_lambda_string_context(id_node)
# It is an implicit builtin variable such as: "count", "char"
return id_value in BuiltinVariablesCanHaveImplicitScope
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin function name identifier.
The given identifier should be a child node of NodeType.CALL.
"""
# Builtin functions are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if not is_function_identifier(id_node):
return False
# There are difference between a function identifier and variable
# identifier:
#
# let localtime = 0
# echo localtime " => 0
# echo localtime() " => 1420011455
return id_value in BuiltinFunctions
def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
- echo s:var
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
- echo s:my_{var}
"""
return not (is_dynamic_identifier(node) or is_member_identifier(node))
def is_analyzable_declarative_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable declarative identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
"""
return is_declarative_identifier(node) and is_analyzable_identifier(node)
def _detect_possible_identifier_scope_visibility(id_node, context_scope):
# type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
explicit_scope_visibility = _get_explicit_scope_visibility(id_node)
if explicit_scope_visibility is not None:
# Vim allow `g:` as a function name prefix but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
is_unrecommended_explicit = is_function_identifier(id_node) and _is_just_global(id_node)
if is_unrecommended_explicit:
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.UNRECOMMENDED_EXPLICIT
)
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.EXPLICIT
)
if is_function_argument(id_node):
# Function arguments can not have any explicit scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_function(id_node):
# Builtin functions can not have any scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_variable(id_node):
# Implicit scope variable will be resolved as a builtin variable if it
# has a same name to Vim builtin variables.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT
)
if is_function_identifier(id_node):
# Functions can have the scope visibility only explicit global or
# implicit global or explicit script local. So a function have implicit
# scope visibility is always a global function.
#
# And the explicity should be implicit. Vim allow `g:` but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if not context_scope:
# We can not detect implicit scope visibility if context scope is not
# specified.
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
current_scope_visibility = context_scope.scope_visibility
# A lambda argument declaration or the references can not have any explicit scope prefix.
if current_scope_visibility is ScopeVisibility.LAMBDA:
if is_lambda_argument_identifier(id_node):
# It can not have any explicit scope prefix.
explicity = ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
else:
# We can not detect the scope of an implicit variable until
# we know whether the variable can reach to a lambda argument or not.
# If it can reach to a lambda argument, then it is IMPLICIT_BUT_CONSTRAINED otherwise IMPLICIT.
explicity = ExplicityOfScopeVisibility.IMPLICIT_OR_LAMBDA
else:
explicity = ExplicityOfScopeVisibility.IMPLICIT
if current_scope_visibility is ScopeVisibility.SCRIPT_LOCAL:
# Implicit scope variable will be resolved as a global variable when
# current scope is script local.
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
explicity
)
# Otherwise be a function local variable.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
explicity
)
def _get_explicit_scope_visibility(id_node): # type: (Dict[str, Any]) -> Optional[ScopeVisibility]
# See :help internal-variables
scope_prefix = id_node['value'][0:2]
if is_function_identifier(id_node) and is_declarative_identifier(id_node):
return FunctionDeclarationIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
else:
return VariableIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
def _is_just_global(id_node): # type: (Dict[str, Any]) -> bool
# See :help internal-variables
return id_node['value'][0:2] == 'g:'
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/identifier_classifier.py
|
IdentifierClassifier.attach_identifier_attributes
|
python
|
def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any]
redir_assignment_parser = RedirAssignmentParser()
ast_with_parsed_redir = redir_assignment_parser.process(ast)
map_and_filter_parser = CallNodeParser()
ast_with_parse_map_and_filter_and_redir = \
map_and_filter_parser.process(ast_with_parsed_redir)
traverse(
ast_with_parse_map_and_filter_and_redir,
on_enter=lambda node: self._enter_handler(
node,
is_on_lambda_str=None,
is_on_lambda_body=None,
)
)
return ast
|
Attach 5 flags to the AST.
- is dynamic: True if the identifier name can be determined by static analysis.
- is member: True if the identifier is a member of a subscription/dot/slice node.
- is declaring: True if the identifier is used to declare.
- is autoload: True if the identifier is declared with autoload.
- is function: True if the identifier is a function. Vim distinguish
between function identifiers and variable identifiers.
- is declarative parameter: True if the identifier is a declarative
parameter. For example, the identifier "param" in Func(param) is a
declarative parameter.
- is on string expression context: True if the variable is on the
string expression context. The string expression context is the
string content on the 2nd argument of the map or filter function.
- is lambda argument: True if the identifier is a lambda argument.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/identifier_classifier.py#L118-L150
|
[
"def traverse(node, on_enter=None, on_leave=None):\n \"\"\" Traverses the specified Vim script AST node (depth first order).\n The on_enter/on_leave handler will be called with the specified node and\n the children. You can skip traversing child nodes by returning\n SKIP_CHILDREN.\n \"\"\"\n node_type = NodeType(node['type'])\n\n if node_type not in ChildNodeAccessorMap:\n raise UnknownNodeTypeException(node_type)\n\n if on_enter:\n should_traverse_children = on_enter(node) is not SKIP_CHILDREN\n else:\n should_traverse_children = True\n\n if should_traverse_children:\n for property_accessor in ChildNodeAccessorMap[node_type]:\n accessor_func = property_accessor['accessor']\n prop_name = property_accessor['property_name']\n\n accessor_func(lambda child_node: traverse(child_node, on_enter, on_leave),\n node[prop_name])\n\n for handler in _traverser_extensions:\n handler(node, on_enter=on_enter, on_leave=on_leave)\n\n if on_leave:\n on_leave(node)\n",
"def process(self, ast):\n def enter_handler(node):\n node_type = NodeType(node['type'])\n if node_type is not NodeType.CALL:\n return\n\n called_function_identifier = node['left']\n\n # The name node type of \"map\" or \"filter\" or \"call\" are always IDENTIFIER.\n if NodeType(called_function_identifier['type']) is not NodeType.IDENTIFIER:\n return\n\n called_function_identifier_value = called_function_identifier.get('value')\n\n if called_function_identifier_value in ['map', 'filter']:\n # Analyze second argument of \"map\" or \"filter\" if the node type is STRING.\n self._attach_string_expr_content_to_map_or_func(node)\n elif called_function_identifier_value in ['call', 'function']:\n # Analyze first argument of \"call\" or \"function\" if the node type is STRING.\n self._attach_string_expr_content_to_call_or_function(node)\n\n traverse(ast, on_enter=enter_handler)\n\n return ast\n",
"def process(self, ast):\n def enter_handler(node):\n node_type = NodeType(node['type'])\n if node_type is not NodeType.EXCMD:\n return\n\n is_redir_command = node['ea']['cmd'].get('name') == 'redir'\n if not is_redir_command:\n return\n\n redir_cmd_str = node['str']\n is_redir_assignment = '=>' in redir_cmd_str\n if not is_redir_assignment:\n return\n\n parser = Parser()\n redir_content_node = parser.parse_redir(node)\n node[REDIR_CONTENT] = redir_content_node\n\n traverse(ast, on_enter=enter_handler)\n\n return ast\n"
] |
class IdentifierClassifier(object):
""" A class for identifier classifiers.
This class classify nodes by 5 flags:
- is dynamic: True if the identifier name can be determined by static analysis.
- is member: True if the identifier is a member of a subscription/dot/slice node.
- is declaring: True if the identifier is used to declare.
- is autoload: True if the identifier is declared with autoload.
- is function: True if the identifier is a function. Vim distinguish
between function identifiers and variable identifiers.
- is declarative parameter: True if the identifier is a declarative
parameter. For example, the identifier "param" in Func(param) is a
declarative parameter.
"""
class IdentifierCollector(object):
""" A class for identifier node collectors.
Only static and not member nodes will be collected and the nodes will
be grouped by 2 types; declaring or referencing.
"""
def __init__(self):
self._static_referencing_identifiers = None # type: List[Dict[str, Any]]
self._static_declaring_identifiers = None # type: List[Dict[str, Any]]
def collect_identifiers(self, ast): # type: (Dict[str, Any]) -> CollectedIdentifiers
self._static_referencing_identifiers = []
self._static_declaring_identifiers = []
# TODO: Make more performance efficiency.
traverse(ast, on_enter=self._enter_handler)
return CollectedIdentifiers(
self._static_declaring_identifiers,
self._static_referencing_identifiers
)
def _enter_handler(self, node):
if not _is_identifier_like_node(node):
return
# FIXME: Dynamic identifiers should be returned and it should be filtered by the caller.
if _is_dynamic_identifier(node) or _is_member_identifier(node) or _is_variadic_symbol(node):
return
if _is_declarative_identifier(node):
self._static_declaring_identifiers.append(node)
else:
self._static_referencing_identifiers.append(node)
def _enter_handler(self, node, is_on_lambda_body, is_on_lambda_str):
node_type = NodeType(node['type'])
if node_type in IdentifierTerminateNodeTypes:
# Attach identifier attributes to all IdentifierTerminateNodeTypes.
self._enter_identifier_terminate_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
if node_type in AccessorLikeNodeTypes:
self._pre_mark_accessor_children(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
if node_type in DeclarativeNodeTypes:
self._enter_declarative_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
if node_type is NodeType.CALL:
self._enter_call_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
if node_type is NodeType.DELFUNCTION:
self._enter_delfunction_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
if node_type is NodeType.STRING:
self._enter_string_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
if node_type is NodeType.LAMBDA:
return self._enter_lambda_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _pre_mark_accessor_children(self, node, is_on_lambda_body, is_on_lambda_str):
node_type = NodeType(node['type'])
dict_node = node['left']
if NodeType(dict_node['type']) in AccessorLikeNodeTypes:
self._pre_mark_accessor_children(
dict_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
if node_type is NodeType.SLICE:
for member_node in node['rlist']:
# In VimLParser spec, an empty array means null.
# list[1:] => {rlist: [node, []]}
if type(member_node) is list:
continue
if NodeType(member_node['type']) is NodeType.IDENTIFIER:
# Only the identifier should be flagged as a member that
# the variable is an accessor for a list or dictionary.
# For example, the variable that is "l:end" in list[0 : l:end]
# is not accessor for the symbol table of the variable "list",
# but it is a variable symbol table accessor.
continue
self._pre_mark_member_node(
member_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
return
member_node = node['right']
if node_type is NodeType.SUBSCRIPT:
if NodeType(member_node['type']) is NodeType.IDENTIFIER:
# Only the identifier should be flagged as a member that
# the variable is an accessor for a list or dictionary.
# For example, the variable that is "l:key" in dict[l:key]
# is not accessor for the symbol table of the variable "dict",
# but it is a variable symbol table accessor.
return
self._pre_mark_member_node(
member_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _pre_mark_member_node(self, member_node, is_on_lambda_body, is_on_lambda_str):
member_node_type = NodeType(member_node['type'])
if member_node_type in IdentifierTerminateNodeTypes or \
member_node_type in AnalyzableSubScriptChildNodeTypes:
_set_identifier_attribute(
member_node,
is_member=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_identifier_like_node(self, node, is_on_lambda_body, is_on_lambda_str, is_declarative=None,
is_function=None, is_declarative_parameter=None):
node_type = NodeType(node['type'])
if node_type in AccessorLikeNodeTypes:
id_like_node = node
self._enter_accessor_node(
id_like_node,
is_declarative=is_declarative,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
return
if node_type in IdentifierTerminateNodeTypes:
id_like_node = node
self._enter_identifier_terminate_node(
id_like_node,
is_declarative=is_declarative,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_body=is_on_lambda_body,
is_on_lambda_str=is_on_lambda_str,
)
return
def _enter_function_node(self, func_node, is_on_lambda_body, is_on_lambda_str):
# Function node has declarative identifiers as the function name and
# the parameter names.
# Function name is in the left.
func_name_node = func_node['left']
self._enter_identifier_like_node(
func_name_node,
is_declarative=True,
is_function=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
# Function parameter names are in the r_list.
func_param_nodes = func_node['rlist']
for func_param_node in func_param_nodes:
self._enter_identifier_like_node(
func_param_node,
is_declarative_parameter=True,
is_declarative=True,
is_on_lambda_body=is_on_lambda_body,
is_on_lambda_str=is_on_lambda_str,
)
def _enter_delfunction_node(self, delfunc_node, is_on_lambda_body, is_on_lambda_str):
func_name_node = delfunc_node['left']
self._enter_identifier_like_node(
func_name_node,
is_function=True,
is_on_lambda_body=is_on_lambda_body,
is_on_lambda_str=is_on_lambda_str
)
def _enter_curlyname_node(self, curlyname_node, is_on_lambda_body, is_on_lambda_str,
is_declarative=None, is_function=None, is_declarative_parameter=None):
# Curlyname node can have a dynamic name. For example:
# let s:var = 'VAR'
# let my_{s:var} = 0
_set_identifier_attribute(
curlyname_node,
is_dynamic=True,
is_declarative=is_declarative,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_identifier_terminate_node(self, id_term_node, is_on_lambda_body, is_on_lambda_str, is_declarative=None,
is_function=None, is_declarative_parameter=None, is_lambda_argument=None):
node_type = NodeType(id_term_node['type'])
if node_type is NodeType.CURLYNAME:
self._enter_curlyname_node(
id_term_node,
is_on_lambda_body=is_on_lambda_body,
is_on_lambda_str=is_on_lambda_str,
is_declarative=is_declarative,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
)
return
is_autoload = '#' in id_term_node['value']
is_variadic = id_term_node['value'] == '...'
_set_identifier_attribute(
id_term_node,
is_lambda_argument=is_lambda_argument,
is_on_lambda_body=is_on_lambda_body,
is_on_lambda_str=is_on_lambda_str,
is_declarative=is_declarative,
is_autoload=is_autoload,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_variadic=is_variadic,
)
def _enter_accessor_node(self, accessor_node, is_on_lambda_body, is_on_lambda_str, is_declarative=None,
is_function=None, is_declarative_parameter=None):
accessor_node_type = NodeType(accessor_node['type'])
if accessor_node_type is NodeType.DOT:
_set_identifier_attribute(
accessor_node['right'],
is_declarative=is_declarative,
is_dynamic=False,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
return
if accessor_node_type is NodeType.SUBSCRIPT:
subscript_right_type = NodeType(accessor_node['right']['type'])
# We can do static analysis NodeType.SUBSCRIPT such as:
# let object['name'] = 0
#
# but we cannot do it in other cases such as:
# let object[var] = 0
is_dynamic = subscript_right_type not in AnalyzableSubScriptChildNodeTypes
if not is_dynamic:
_set_identifier_attribute(
accessor_node['right'],
is_declarative=is_declarative,
is_dynamic=False,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
return
if accessor_node_type is NodeType.SLICE:
for elem_node in accessor_node['rlist']:
if type(elem_node) is list:
# In VimLParser spec, an empty array means null.
# list[1:] => {rlist: [node, []]}
continue
elem_node_type = NodeType(elem_node['type'])
# We can do static analysis NodeType.SLICE such as:
# let object[0:1] = 0
#
# but we cannot do it in other cases such as:
# let object[0:var] = 0
is_dynamic = elem_node_type not in AnalyzableSubScriptChildNodeTypes
# In the following case, 0 is a declarative but var is not declarative.
# It is more like a reference.
# let object[0:var] = 0
is_declarative = elem_node_type in AnalyzableSubScriptChildNodeTypes
_set_identifier_attribute(
elem_node,
is_declarative=is_declarative,
is_dynamic=is_dynamic,
is_function=is_function,
is_declarative_parameter=is_declarative_parameter,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
return
raise Exception()
def _enter_let_node(self, let_node, is_on_lambda_body, is_on_lambda_str):
# Only "=" operator can be used as declaration.
if let_node['op'] != '=':
return
self._enter_assignment_node(
let_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_for_node(self, for_node, is_on_lambda_body, is_on_lambda_str):
self._enter_assignment_node(
for_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_assignment_node(self, node, is_on_lambda_body, is_on_lambda_str):
# In VimLParser spec, an empty array means null.
#
# | Normal assignment | Destructuring assignment |
# |:--------------------:|:---------------------------:|
# | node['left'] == Node | node['left'] == [] |
# | node['list'] == [] | node['list'] == [Node, ...] |
left_node = node['left']
is_destructuring_assignment = type(left_node) is list
if is_destructuring_assignment:
for elem_node in node['list']:
self._enter_identifier_like_node(
elem_node,
is_declarative=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
else:
self._enter_identifier_like_node(
left_node,
is_declarative=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
rest_node = node['rest']
has_rest = type(rest_node) is not list
if has_rest:
self._enter_identifier_like_node(
rest_node,
is_declarative=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_declarative_node(self, node, is_on_lambda_body, is_on_lambda_str):
node_type = NodeType(node['type'])
if node_type is NodeType.FUNCTION:
self._enter_function_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
elif node_type is NodeType.LET:
self._enter_let_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
elif node_type is NodeType.FOR:
self._enter_for_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
elif node_type is NodeType.EXCMD:
self._enter_excmd_node(
node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_call_node(self, call_node, is_on_lambda_body, is_on_lambda_str):
called_func_node = call_node['left']
self._enter_identifier_like_node(
called_func_node,
is_function=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
def _enter_string_node(self, string_node, is_on_lambda_body, is_on_lambda_str):
# Classify the 2nd argument node of "map" and "filter" call when the node type is STRING.
lambda_string_expr_content_nodes = get_lambda_string_expr_content(string_node)
if lambda_string_expr_content_nodes is not None:
self._enter_lambda_str_expr_content_node(
lambda_string_expr_content_nodes,
is_on_lambda_body=is_on_lambda_body
)
# Classify the 1st argument node of "call" and "function" call when the node type is STRING.
func_ref_expr_content_nodes = get_function_reference_string_expr_content(string_node)
if func_ref_expr_content_nodes is not None:
self._enter_func_ref_str_expr_content_node(
func_ref_expr_content_nodes,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
return SKIP_CHILDREN
def _enter_lambda_str_expr_content_node(self, lambda_string_expr_content_nodes, is_on_lambda_body):
for string_expr_content_node in lambda_string_expr_content_nodes:
traverse(
string_expr_content_node,
on_enter=lambda node: self._enter_handler(
node,
is_on_lambda_str=True,
is_on_lambda_body=is_on_lambda_body,
)
)
def _enter_func_ref_str_expr_content_node(self, func_ref_id_nodes, is_on_lambda_str, is_on_lambda_body):
for func_ref_id_node in func_ref_id_nodes:
self._enter_identifier_terminate_node(
func_ref_id_node,
is_function=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body
)
def _enter_excmd_node(self, cmd_node, is_on_lambda_body, is_on_lambda_str):
# Care an assignment by using command ":redir"
redir_content_node = get_redir_content(cmd_node)
if not redir_content_node:
return
self._enter_identifier_like_node(
redir_content_node,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
is_declarative=True
)
def _enter_lambda_node(self, lambda_node, is_on_lambda_body, is_on_lambda_str):
# Function parameter names are in the r_list.
lambda_argument_nodes = lambda_node['rlist']
for lambda_argument_node in lambda_argument_nodes:
self._enter_identifier_terminate_node(
lambda_argument_node,
is_declarative=True,
is_lambda_argument=True,
is_on_lambda_str=is_on_lambda_str,
is_on_lambda_body=is_on_lambda_body,
)
# Traversing on lambda body context.
traverse(
lambda_node['left'],
on_enter=lambda node: self._enter_handler(
node,
is_on_lambda_body=True,
is_on_lambda_str=is_on_lambda_str,
)
)
# NOTE: Traversing to the lambda args and children was continued by the above traversing.
return SKIP_CHILDREN
|
Kuniwak/vint
|
vint/linting/policy/abstract_policy.py
|
AbstractPolicy.create_violation_report
|
python
|
def create_violation_report(self, node, lint_context):
return {
'name': self.name,
'level': self.level,
'description': self.description,
'reference': self.reference,
'position': {
'line': node['pos']['lnum'],
'column': node['pos']['col'],
'path': lint_context['lint_target'].path,
},
}
|
Returns a violation report for the node.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L22-L34
| null |
class AbstractPolicy(object):
description = None
reference = None
level = None
def __init__(self):
self.name = self.__class__.__name__
def listen_node_types(self):
""" Listening node type.
is_valid will be called when a linter visit the listening node type.
"""
return []
def is_valid(self, node, lint_context):
""" Whether the specified node is valid for the policy. """
return True
def get_policy_config(self, lint_context):
"""
Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on
config.policies.ProhibitSomethingEvil.
"""
policy_config = lint_context['config']\
.get('policies', {})\
.get(self.__class__.__name__, {})
return policy_config
def get_violation_if_found(self, node, lint_context):
""" Returns a violation if the node is invalid. """
if self.is_valid(node, lint_context):
return None
return self.create_violation_report(node, lint_context)
def get_policy_options(self, lint_context):
policy_section = lint_context['config'].get('policies', {})
return policy_section.get(self.name, {})
|
Kuniwak/vint
|
vint/linting/policy/abstract_policy.py
|
AbstractPolicy.get_policy_config
|
python
|
def get_policy_config(self, lint_context):
policy_config = lint_context['config']\
.get('policies', {})\
.get(self.__class__.__name__, {})
return policy_config
|
Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on
config.policies.ProhibitSomethingEvil.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L37-L46
| null |
class AbstractPolicy(object):
description = None
reference = None
level = None
def __init__(self):
self.name = self.__class__.__name__
def listen_node_types(self):
""" Listening node type.
is_valid will be called when a linter visit the listening node type.
"""
return []
def is_valid(self, node, lint_context):
""" Whether the specified node is valid for the policy. """
return True
def create_violation_report(self, node, lint_context):
""" Returns a violation report for the node. """
return {
'name': self.name,
'level': self.level,
'description': self.description,
'reference': self.reference,
'position': {
'line': node['pos']['lnum'],
'column': node['pos']['col'],
'path': lint_context['lint_target'].path,
},
}
def get_violation_if_found(self, node, lint_context):
""" Returns a violation if the node is invalid. """
if self.is_valid(node, lint_context):
return None
return self.create_violation_report(node, lint_context)
def get_policy_options(self, lint_context):
policy_section = lint_context['config'].get('policies', {})
return policy_section.get(self.name, {})
|
Kuniwak/vint
|
vint/linting/policy/abstract_policy.py
|
AbstractPolicy.get_violation_if_found
|
python
|
def get_violation_if_found(self, node, lint_context):
if self.is_valid(node, lint_context):
return None
return self.create_violation_report(node, lint_context)
|
Returns a violation if the node is invalid.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L49-L54
|
[
"def is_valid(self, node, lint_context):\n \"\"\" Whether the specified node is valid for the policy. \"\"\"\n return True\n"
] |
class AbstractPolicy(object):
description = None
reference = None
level = None
def __init__(self):
self.name = self.__class__.__name__
def listen_node_types(self):
""" Listening node type.
is_valid will be called when a linter visit the listening node type.
"""
return []
def is_valid(self, node, lint_context):
""" Whether the specified node is valid for the policy. """
return True
def create_violation_report(self, node, lint_context):
""" Returns a violation report for the node. """
return {
'name': self.name,
'level': self.level,
'description': self.description,
'reference': self.reference,
'position': {
'line': node['pos']['lnum'],
'column': node['pos']['col'],
'path': lint_context['lint_target'].path,
},
}
def get_policy_config(self, lint_context):
"""
Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on
config.policies.ProhibitSomethingEvil.
"""
policy_config = lint_context['config']\
.get('policies', {})\
.get(self.__class__.__name__, {})
return policy_config
def get_policy_options(self, lint_context):
policy_section = lint_context['config'].get('policies', {})
return policy_section.get(self.name, {})
|
Kuniwak/vint
|
vint/bootstrap.py
|
import_all_policies
|
python
|
def import_all_policies():
pkg_name = _get_policy_package_name_for_test()
pkg_path_list = pkg_name.split('.')
pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve())
for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]):
if not is_pkg:
module_fqn = pkg_name + '.' + module_name
logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn))
importlib.import_module(module_fqn)
|
Import all policies that were registered by vint.linting.policy_registry.
Dynamic policy importing is comprised of the 3 steps
1. Try to import all policy modules (then we can't know what policies exist)
2. In policy module, register itself by using vint.linting.policy_registry
3. After all policies registered by itself, we can get policy classes
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/bootstrap.py#L24-L41
|
[
"def _get_policy_package_name_for_test():\n \"\"\" Test hook method that returns a package name for policy modules. \"\"\"\n return 'vint.linting.policy'\n",
"def _get_vint_root():\n return Path(__file__).parent.parent\n"
] |
import importlib
import pkgutil
from pathlib import Path
from vint.linting.cli import start_cli
import logging
LOG_FORMAT = 'vint %(levelname)s: %(message)s'
def init_logger():
logging.basicConfig(format=LOG_FORMAT)
def init_linter():
import_all_policies()
def init_cli():
start_cli()
def _get_vint_root():
return Path(__file__).parent.parent
def _get_policy_package_name_for_test():
""" Test hook method that returns a package name for policy modules. """
return 'vint.linting.policy'
|
Kuniwak/vint
|
vint/ast/plugin/scope_plugin/scope_linker.py
|
ScopeLinker.process
|
python
|
def process(self, ast): # type: (Dict[str, Any]) -> None
id_classifier = IdentifierClassifier()
attached_ast = id_classifier.attach_identifier_attributes(ast)
# We are already in script local scope.
self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL)
traverse(attached_ast,
on_enter=self._enter_handler,
on_leave=self._leave_handler)
self.scope_tree = self._scope_tree_builder.get_global_scope()
self.link_registry = self._scope_tree_builder.link_registry
|
Build a scope tree and links between scopes and identifiers by the
specified ast. You can access the built scope tree and the built links
by .scope_tree and .link_registry.
|
train
|
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_linker.py#L326-L342
|
[
"def traverse(node, on_enter=None, on_leave=None):\n \"\"\" Traverses the specified Vim script AST node (depth first order).\n The on_enter/on_leave handler will be called with the specified node and\n the children. You can skip traversing child nodes by returning\n SKIP_CHILDREN.\n \"\"\"\n node_type = NodeType(node['type'])\n\n if node_type not in ChildNodeAccessorMap:\n raise UnknownNodeTypeException(node_type)\n\n if on_enter:\n should_traverse_children = on_enter(node) is not SKIP_CHILDREN\n else:\n should_traverse_children = True\n\n if should_traverse_children:\n for property_accessor in ChildNodeAccessorMap[node_type]:\n accessor_func = property_accessor['accessor']\n prop_name = property_accessor['property_name']\n\n accessor_func(lambda child_node: traverse(child_node, on_enter, on_leave),\n node[prop_name])\n\n for handler in _traverser_extensions:\n handler(node, on_enter=on_enter, on_leave=on_leave)\n\n if on_leave:\n on_leave(node)\n",
"def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any]\n \"\"\" Attach 5 flags to the AST.\n\n - is dynamic: True if the identifier name can be determined by static analysis.\n - is member: True if the identifier is a member of a subscription/dot/slice node.\n - is declaring: True if the identifier is used to declare.\n - is autoload: True if the identifier is declared with autoload.\n - is function: True if the identifier is a function. Vim distinguish\n between function identifiers and variable identifiers.\n - is declarative parameter: True if the identifier is a declarative\n parameter. For example, the identifier \"param\" in Func(param) is a\n declarative parameter.\n - is on string expression context: True if the variable is on the\n string expression context. The string expression context is the\n string content on the 2nd argument of the map or filter function.\n - is lambda argument: True if the identifier is a lambda argument.\n \"\"\"\n redir_assignment_parser = RedirAssignmentParser()\n ast_with_parsed_redir = redir_assignment_parser.process(ast)\n\n map_and_filter_parser = CallNodeParser()\n ast_with_parse_map_and_filter_and_redir = \\\n map_and_filter_parser.process(ast_with_parsed_redir)\n\n traverse(\n ast_with_parse_map_and_filter_and_redir,\n on_enter=lambda node: self._enter_handler(\n node,\n is_on_lambda_str=None,\n is_on_lambda_body=None,\n )\n )\n return ast\n",
"def enter_new_scope(self, scope_visibility): # type: (ScopeVisibility) -> None\n current_scope = self.get_current_scope()\n new_scope = Scope(scope_visibility)\n self._add_symbol_table_variables(new_scope)\n\n # Build a lexical scope chain\n current_scope.child_scopes.append(new_scope)\n self._scope_stack.append(new_scope)\n",
"def get_global_scope(self): # type: () -> Scope\n return self._scope_stack[0]\n"
] |
class ScopeLinker(object):
""" A class for scope linkers.
The class link identifiers in the given AST node and the scopes where the
identifier will be declared or referenced.
"""
class ScopeTreeBuilder(object):
""" A class for event-driven builders to build a scope tree.
The class interest to scope-level events rather than AST-level events.
"""
def __init__(self):
self.link_registry = ScopeLinker.ScopeLinkRegistry()
global_scope = Scope(ScopeVisibility.GLOBAL_LIKE)
self._scope_stack = [global_scope]
self._add_symbol_table_variables(global_scope)
def enter_new_scope(self, scope_visibility): # type: (ScopeVisibility) -> None
current_scope = self.get_current_scope()
new_scope = Scope(scope_visibility)
self._add_symbol_table_variables(new_scope)
# Build a lexical scope chain
current_scope.child_scopes.append(new_scope)
self._scope_stack.append(new_scope)
def leave_current_scope(self): # type: () -> None
self._scope_stack.pop()
def get_global_scope(self): # type: () -> Scope
return self._scope_stack[0]
def get_script_local_scope(self): # type: () -> Scope
return self._scope_stack[1]
def get_current_scope(self): # type: () -> Scope
return self._scope_stack[-1]
def handle_new_parameter_found(self, id_node, is_lambda_argument): # type: (Dict[str, Any], bool) -> None
current_scope = self.get_current_scope()
self._add_parameter(current_scope, id_node, is_lambda_argument)
def handle_new_range_parameters_found(self): # type: () -> None
# We can access "a:firstline" and "a:lastline" if the function is
# declared with an attribute "range". See :func-range
firstline_node = _create_virtual_identifier_node('firstline')
lastline_node = _create_virtual_identifier_node('lastline')
current_scope = self.get_current_scope()
self._add_parameter(current_scope, firstline_node, is_explicit_lambda_argument=False)
self._add_parameter(current_scope, lastline_node, is_explicit_lambda_argument=False)
def handle_new_dict_parameter_found(self): # type: () -> None
# We can access "l:self" is declared with an attribute "dict".
# See :help self
current_scope = self.get_current_scope()
self._add_self_variable(current_scope)
def handle_new_parameters_list_and_length_found(self): # type: () -> None
# We can always access a:0 and a:000
# See :help internal-variables
param_length_node = _create_virtual_identifier_node('0')
param_list_node = _create_virtual_identifier_node('000')
current_scope = self.get_current_scope()
self._add_parameter(current_scope, param_length_node, is_explicit_lambda_argument=False)
self._add_parameter(current_scope, param_list_node, is_explicit_lambda_argument=False)
def handle_new_index_parameters_found(self, params_number): # type: (int) -> None
current_scope = self.get_current_scope()
# Max parameters number is 20. See :help E740
for variadic_index in range(20 - params_number):
# Variadic parameters named 1-based index.
variadic_param = _create_virtual_identifier_node(str(variadic_index + 1))
self._add_parameter(current_scope, variadic_param, is_explicit_lambda_argument=False)
def handle_new_variable_found(self, id_node): # type: (Dict[str, Any]) -> None
current_scope = self.get_current_scope()
scope_visibility_hint = detect_possible_scope_visibility(id_node, current_scope)
if scope_visibility_hint.scope_visibility is ScopeVisibility.UNANALYZABLE \
or scope_visibility_hint.scope_visibility is ScopeVisibility.INVALID:
# We can not do anything
return
is_function = is_function_identifier(id_node)
if is_builtin_variable(id_node):
self._add_builtin_variable(id_node,
is_function=is_function,
explicity=scope_visibility_hint.explicity)
return
objective_scope = self._get_objective_scope(id_node)
self._add_variable(objective_scope,
id_node,
is_function,
scope_visibility_hint.explicity)
def _get_objective_scope(self, node): # type: (Dict[str, Any]) -> Scope
current_scope = self.get_current_scope()
scope_visibility_hint = detect_possible_scope_visibility(
node, current_scope)
scope_visibility = scope_visibility_hint.scope_visibility
if scope_visibility is ScopeVisibility.GLOBAL_LIKE:
return self.get_global_scope()
if scope_visibility is ScopeVisibility.SCRIPT_LOCAL:
return self.get_script_local_scope()
# It is FUNCTION_LOCAL or LAMBDA scope
return current_scope
def handle_referencing_identifier_found(self, node): # type: (Dict[str, Any]) -> None
current_scope = self.get_current_scope()
self.link_registry.link_identifier_to_context_scope(node, current_scope)
def _add_parameter(self, objective_scope, id_node, is_explicit_lambda_argument):
# type: (Scope, Dict[str, Any], bool) -> None
if is_explicit_lambda_argument:
# Explicit lambda arguments can not have any explicit scope prefix.
variable_name = id_node['value']
explicity = ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
else:
variable_name = 'a:' + id_node['value']
explicity = ExplicityOfScopeVisibility.EXPLICIT
self._register_variable(
objective_scope,
variable_name,
id_node,
explicity=explicity,
is_function=False,
is_builtin=False,
is_explicit_lambda_argument=is_explicit_lambda_argument
)
def _add_self_variable(self, objective_scope): # type: (Scope) -> None
variable_name = remove_optional_scope_prefix('l:self')
virtual_node = _create_virtual_identifier_node(variable_name)
self._register_variable(
objective_scope,
variable_name,
virtual_node,
explicity=ExplicityOfScopeVisibility.EXPLICIT,
is_function=False,
is_builtin=False,
is_explicit_lambda_argument=False
)
def _add_variable(self, objective_scope, id_node, is_function, explicity):
# type: (Scope, Dict[str, Any], bool, ExplicityOfScopeVisibility) -> None
variable_name = remove_optional_scope_prefix(id_node['value'])
self._register_variable(
objective_scope,
variable_name,
id_node,
explicity,
is_function,
is_builtin=False,
is_explicit_lambda_argument=False
)
def _add_builtin_variable(self, id_node, explicity, is_function):
# type: (Dict[str, Any], ExplicityOfScopeVisibility, bool) -> None
variable_name = remove_optional_scope_prefix(id_node['value'])
self._register_variable(
self.get_global_scope(),
variable_name,
id_node,
explicity,
is_function,
is_builtin=True,
is_explicit_lambda_argument=False
)
def _add_symbol_table_variables(self, objective_scope): # type: (Scope) -> None
# We can always access any symbol tables such as: "g:", "s:", "l:".
# See :help internal-variables
scope_visibility = objective_scope.scope_visibility
symbol_table_variable_names = SymbolTableVariableNames[scope_visibility]
for symbol_table_variable_name in symbol_table_variable_names:
virtual_node = _create_virtual_identifier_node(symbol_table_variable_name)
self._register_variable(
objective_scope,
symbol_table_variable_name,
virtual_node,
# NOTE: Symbol table always have scope prefix.
explicity=ExplicityOfScopeVisibility.EXPLICIT,
is_builtin=False,
is_function=False,
is_explicit_lambda_argument=False
)
def _register_variable(self, objective_scope, variable_name, node, explicity, is_function, is_builtin, is_explicit_lambda_argument):
# type: (Scope, str, Dict[str, Any], ExplicityOfScopeVisibility, bool, bool, bool) -> None
variable = VariableDeclaration(
explicity,
is_builtin,
is_explicit_lambda_argument
)
if is_function:
objective_variable_list = objective_scope.functions
else:
objective_variable_list = objective_scope.variables
same_name_variables = objective_variable_list.setdefault(variable_name, [])
same_name_variables.append(variable)
self.link_registry.link_variable_to_declarative_identifier(variable, node)
current_scope = self.get_current_scope()
self.link_registry.link_identifier_to_context_scope(node, current_scope)
class ScopeLinkRegistry(object):
""" A class for registry services for links between scopes and
identifiers.
"""
def __init__(self):
self._vars_to_declarative_ids_map = {} # type: Dict[int, Dict[str, Any]]
self._ids_to_scopes_map = {} # type: Dict[int, Scope]
def link_variable_to_declarative_identifier(self, variable, declaring_id_node):
# type: (VariableDeclaration, Dict[str, Any]) -> None
self._vars_to_declarative_ids_map[id(variable)] = declaring_id_node
def get_declarative_identifier_by_variable(self, variable): # type: (VariableDeclaration) -> Dict[str, Any]
variable_id = id(variable)
return self._vars_to_declarative_ids_map.get(variable_id)
def link_identifier_to_context_scope(self, decl_or_ref_id_node, scope): # type: (Dict[str, Any], Scope) -> None
""" Link declarative identifier node or reference identifier node to
the lexical context scope that the identifier is presented at. """
node_id = id(decl_or_ref_id_node)
self._ids_to_scopes_map[node_id] = scope
def get_context_scope_by_identifier(self, decl_or_ref_id_node): # type: (Dict[str, Any]) -> Scope
""" Return the lexical context scope that the identifier is presented at by
a declarative identifier node or a reference identifier node """
node_id = id(decl_or_ref_id_node)
return self._ids_to_scopes_map.get(node_id)
def __init__(self):
self.scope_tree = None # type: Union[Scope, None]
self.link_registry = None # type: ScopeLinker.ScopeLinkRegistry
self._scope_tree_builder = ScopeLinker.ScopeTreeBuilder()
def _find_variable_like_nodes(self, node): # type: (Dict[str, Any]) -> None
if not is_analyzable_identifier(node):
return
if is_analyzable_declarative_identifier(node):
self._scope_tree_builder.handle_new_variable_found(node)
return
self._scope_tree_builder.handle_referencing_identifier_found(node)
def _enter_handler(self, node): # type: (Dict[str, Any]) -> None
node_type = NodeType(node['type'])
if node_type is NodeType.FUNCTION:
return self._handle_function_node(node)
elif node_type is NodeType.LAMBDA:
return self._handle_lambda_node(node)
self._find_variable_like_nodes(node)
def _handle_function_node(self, func_node): # type: (Dict[str, Any]) -> None
# We should interrupt traversing, because a node of the function
# name should be added to the parent scope before the current
# scope switched to a new scope of the function.
# We approach to it by the following 5 steps.
# 1. Add the function to the current scope
# 2. Create a new scope of the function
# 3. The current scope point to the new scope
# 4. Add parameters to the new scope
# 5. Add variables in the function body to the new scope
# 1. Add the function to the current scope
func_name_node = func_node['left']
traverse(func_name_node, on_enter=self._find_variable_like_nodes)
# 2. Create a new scope of the function
# 3. The current scope point to the new scope
self._scope_tree_builder.enter_new_scope(ScopeVisibility.FUNCTION_LOCAL)
has_variadic = False
# 4. Add parameters to the new scope
param_nodes = func_node['rlist']
for param_node in param_nodes:
if param_node['value'] == '...':
has_variadic = True
else:
# the param_node type is always NodeType.IDENTIFIER
self._scope_tree_builder.handle_new_parameter_found(param_node, is_lambda_argument=False)
# We can always access a:0, a:000
self._scope_tree_builder.handle_new_parameters_list_and_length_found()
# In a variadic function, we can access a:1 ... a:n
# (n = 20 - explicit parameters length). See :help a:0
if has_variadic:
# -1 means ignore '...'
self._scope_tree_builder.handle_new_index_parameters_found(len(param_nodes) - 1)
# We can access "a:firstline" and "a:lastline" if the function is
# declared with an attribute "range". See :func-range
attr = func_node['attr']
is_declared_with_range = attr['range'] is not 0
if is_declared_with_range:
self._scope_tree_builder.handle_new_range_parameters_found()
# We can access "l:self" is declared with an attribute "dict" or
# the function is a member of a dict. See :help self
is_declared_with_dict = attr['dict'] is not 0 \
or NodeType(func_name_node['type']) in FunctionNameNodesDeclaringVariableSelf
if is_declared_with_dict:
self._scope_tree_builder.handle_new_dict_parameter_found()
# 5. Add variables in the function body to the new scope
func_body_nodes = func_node['body']
for func_body_node in func_body_nodes:
traverse(func_body_node,
on_enter=self._enter_handler,
on_leave=self._leave_handler)
# Skip child nodes traversing
return SKIP_CHILDREN
def _handle_lambda_node(self, lambda_node): # type: (Dict[str, Any]) -> Optional[str]
# This method do the following 4 steps:
# 1. Create a new scope of the lambda
# 2. The current scope point to the new scope
# 3. Add parameters to the new scope
# 4. Add variables in the function body to the new scope
# 1. Create a new scope of the function
# 2. The current scope point to the new scope
self._scope_tree_builder.enter_new_scope(ScopeVisibility.LAMBDA)
# 3. Add parameters to the new scope
has_variadic_symbol = False
param_nodes = lambda_node['rlist']
for param_node in param_nodes:
if param_node['value'] == '...':
has_variadic_symbol = True
else:
# the param_node type is always NodeType.IDENTIFIER
self._scope_tree_builder.handle_new_parameter_found(param_node, is_lambda_argument=True)
# We can access a:0 and a:000 when the number of arguments is less than actual parameters.
self._scope_tree_builder.handle_new_parameters_list_and_length_found()
# In the context of lambda, we can access a:1 ... a:n when the number of arguments is less than actual parameters.
# XXX: We can not know what a:N we can access by static analysis, so we assume it is 20.
if has_variadic_symbol:
lambda_args_len = len(param_nodes) - 1
else:
lambda_args_len = len(param_nodes)
self._scope_tree_builder.handle_new_index_parameters_found(lambda_args_len)
# 4. Add variables in the function body to the new scope
traverse(lambda_node['left'],
on_enter=self._enter_handler,
on_leave=self._leave_handler)
# Skip child nodes traversing
return SKIP_CHILDREN
def _leave_handler(self, node): # type: (Dict[str, Any]) -> None
node_type = NodeType(node['type'])
if node_type is NodeType.FUNCTION:
self._scope_tree_builder.leave_current_scope()
elif node_type is NodeType.LAMBDA:
self._scope_tree_builder.leave_current_scope()
|
emre/storm
|
storm/kommandr.py
|
prog.command
|
python
|
def command(self, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
|
Convenient decorator simply creates corresponding command
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L96-L103
| null |
class prog(object):
"""Class to hold an isolated command namespace"""
_COMMAND_FLAG = '_command'
_POSITIONAL = type('_positional', (object,), {})
def __init__(self, **kwargs):
"""Constructor
:param version: program version
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser`` constructor
:param type: dict
"""
kwargs.update({
'formatter_class': argparse.RawTextHelpFormatter,
'epilog': "storm is a command line tool to manage ssh connections.\n"
"get more information at: github.com/emre/storm",
})
self.parser = argparse.ArgumentParser(**kwargs)
self.parser.register('action', 'parsers', AliasedSubParsersAction)
self.parser.formatter_class.width = 300
self.parser.add_argument(
'-v',
'--version',
action='version',
version=__version__
)
self.subparsers = self.parser.add_subparsers(
title="commands", metavar="COMMAND"
)
self.subparsers.required = True
def arg(self, arg_name, *args, **kwargs):
"""Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
"""
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper
def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
def __call__(self):
"""Calls :py:func:``execute`` with :py:class:``sys.argv`` excluding
script name which comes first.
"""
self.execute(sys.argv[1:])
|
emre/storm
|
storm/kommandr.py
|
prog.arg
|
python
|
def arg(self, arg_name, *args, **kwargs):
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper
|
Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L105-L119
| null |
class prog(object):
"""Class to hold an isolated command namespace"""
_COMMAND_FLAG = '_command'
_POSITIONAL = type('_positional', (object,), {})
def __init__(self, **kwargs):
"""Constructor
:param version: program version
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser`` constructor
:param type: dict
"""
kwargs.update({
'formatter_class': argparse.RawTextHelpFormatter,
'epilog': "storm is a command line tool to manage ssh connections.\n"
"get more information at: github.com/emre/storm",
})
self.parser = argparse.ArgumentParser(**kwargs)
self.parser.register('action', 'parsers', AliasedSubParsersAction)
self.parser.formatter_class.width = 300
self.parser.add_argument(
'-v',
'--version',
action='version',
version=__version__
)
self.subparsers = self.parser.add_subparsers(
title="commands", metavar="COMMAND"
)
self.subparsers.required = True
def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
def __call__(self):
"""Calls :py:func:``execute`` with :py:class:``sys.argv`` excluding
script name which comes first.
"""
self.execute(sys.argv[1:])
|
emre/storm
|
storm/kommandr.py
|
prog._generate_command
|
python
|
def _generate_command(self, func, name=None, **kwargs):
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
|
Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L121-L177
| null |
class prog(object):
"""Class to hold an isolated command namespace"""
_COMMAND_FLAG = '_command'
_POSITIONAL = type('_positional', (object,), {})
def __init__(self, **kwargs):
"""Constructor
:param version: program version
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser`` constructor
:param type: dict
"""
kwargs.update({
'formatter_class': argparse.RawTextHelpFormatter,
'epilog': "storm is a command line tool to manage ssh connections.\n"
"get more information at: github.com/emre/storm",
})
self.parser = argparse.ArgumentParser(**kwargs)
self.parser.register('action', 'parsers', AliasedSubParsersAction)
self.parser.formatter_class.width = 300
self.parser.add_argument(
'-v',
'--version',
action='version',
version=__version__
)
self.subparsers = self.parser.add_subparsers(
title="commands", metavar="COMMAND"
)
self.subparsers.required = True
def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
def arg(self, arg_name, *args, **kwargs):
"""Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
"""
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper
def execute(self, arg_list):
"""Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
"""
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
def __call__(self):
"""Calls :py:func:``execute`` with :py:class:``sys.argv`` excluding
script name which comes first.
"""
self.execute(sys.argv[1:])
|
emre/storm
|
storm/kommandr.py
|
prog.execute
|
python
|
def execute(self, arg_list):
arg_map = self.parser.parse_args(arg_list).__dict__
command = arg_map.pop(self._COMMAND_FLAG)
return command(**arg_map)
|
Main function to parse and dispatch commands by given ``arg_list``
:param arg_list: all arguments provided by the command line
:param type: list
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/kommandr.py#L179-L188
| null |
class prog(object):
"""Class to hold an isolated command namespace"""
_COMMAND_FLAG = '_command'
_POSITIONAL = type('_positional', (object,), {})
def __init__(self, **kwargs):
"""Constructor
:param version: program version
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser`` constructor
:param type: dict
"""
kwargs.update({
'formatter_class': argparse.RawTextHelpFormatter,
'epilog': "storm is a command line tool to manage ssh connections.\n"
"get more information at: github.com/emre/storm",
})
self.parser = argparse.ArgumentParser(**kwargs)
self.parser.register('action', 'parsers', AliasedSubParsersAction)
self.parser.formatter_class.width = 300
self.parser.add_argument(
'-v',
'--version',
action='version',
version=__version__
)
self.subparsers = self.parser.add_subparsers(
title="commands", metavar="COMMAND"
)
self.subparsers.required = True
def command(self, *args, **kwargs):
"""Convenient decorator simply creates corresponding command"""
if len(args) == 1 and isinstance(args[0], collections.Callable):
return self._generate_command(args[0])
else:
def _command(func):
return self._generate_command(func, *args, **kwargs)
return _command
def arg(self, arg_name, *args, **kwargs):
"""Decorator function configures any arg by given ``arg_name`` with
supplied ``args`` and ``kwargs`` passing them transparently to
:py:func:``argparse.ArgumentParser.add_argument`` function
:param arg_name: arg name to configure
:param type: str
"""
def wrapper(func):
if not getattr(func, 'argopts', None):
func.argopts = {}
func.argopts[arg_name] = (args, kwargs)
return func
return wrapper
def _generate_command(self, func, name=None, **kwargs):
"""Generates a command parser for given func.
:param func: func to generate related command parser
:param type: function
:param name: command name
:param type: str
:param **kwargs: keyword arguments those passed through to
:py:class:``argparse.ArgumentParser.add_parser``
:param type: dict
"""
func_pointer = name or func.__name__
storm_config = get_storm_config()
aliases, additional_kwarg = None, None
if 'aliases' in storm_config:
for command, alias_list in \
six.iteritems(storm_config.get("aliases")):
if func_pointer == command:
aliases = alias_list
break
func_help = func.__doc__ and func.__doc__.strip()
subparser = self.subparsers.add_parser(name or func.__name__,
aliases=aliases,
help=func_help)
spec = inspect.getargspec(func)
opts = reversed(list(izip_longest(reversed(spec.args or []),
reversed(spec.defaults or []),
fillvalue=self._POSITIONAL())))
for k, v in opts:
argopts = getattr(func, 'argopts', {})
args, kwargs = argopts.get(k, ([], {}))
args = list(args)
is_positional = isinstance(v, self._POSITIONAL)
options = [arg for arg in args if arg.startswith('-')]
if isinstance(v, list):
kwargs.update({
'action': 'append',
})
if is_positional:
if options:
args = options
kwargs.update({'required': True, 'dest': k})
else:
args = [k]
else:
args = options or ['--%s' % k]
kwargs.update({'default': v, 'dest': k})
arg = subparser.add_argument(*args, **kwargs)
subparser.set_defaults(**{self._COMMAND_FLAG: func})
return func
def __call__(self):
"""Calls :py:func:``execute`` with :py:class:``sys.argv`` excluding
script name which comes first.
"""
self.execute(sys.argv[1:])
|
emre/storm
|
storm/parsers/ssh_uri_parser.py
|
parse
|
python
|
def parse(uri, user=None, port=22):
uri = uri.strip()
if not user:
user = getpass.getuser()
# get user
if '@' in uri:
user = uri.split("@")[0]
# get port
if ':' in uri:
port = uri.split(":")[-1]
try:
port = int(port)
except ValueError:
raise ValueError("port must be numeric.")
# get host
uri = re.sub(":.*", "", uri)
uri = re.sub(".*@", "", uri)
host = uri
return (
user,
host,
port,
)
|
parses ssh connection uri-like sentences.
ex:
- root@google.com -> (root, google.com, 22)
- noreply@facebook.com:22 -> (noreply, facebook.com, 22)
- facebook.com:3306 -> ($USER, facebook.com, 3306)
- twitter.com -> ($USER, twitter.com, 22)
default port: 22
default user: $USER (getpass.getuser())
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/parsers/ssh_uri_parser.py#L7-L47
| null |
# -*- coding: utf-8 -*-
import getpass
import re
|
emre/storm
|
storm/__main__.py
|
add
|
python
|
def add(name, connection_uri, id_file="", o=[], config=None):
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Adds a new entry to sshconfig.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L34-L63
|
[
"def parse(uri, user=None, port=22):\n \"\"\"\n parses ssh connection uri-like sentences.\n ex:\n - root@google.com -> (root, google.com, 22)\n - noreply@facebook.com:22 -> (noreply, facebook.com, 22)\n - facebook.com:3306 -> ($USER, facebook.com, 3306)\n - twitter.com -> ($USER, twitter.com, 22)\n\n default port: 22\n default user: $USER (getpass.getuser())\n \"\"\"\n\n uri = uri.strip()\n\n if not user:\n user = getpass.getuser()\n\n # get user\n if '@' in uri:\n user = uri.split(\"@\")[0]\n\n # get port\n if ':' in uri:\n port = uri.split(\":\")[-1]\n\n try:\n port = int(port)\n except ValueError:\n raise ValueError(\"port must be numeric.\")\n\n # get host\n uri = re.sub(\":.*\", \"\", uri)\n uri = re.sub(\".*@\", \"\", uri)\n host = uri\n\n return (\n user,\n host,\n port,\n )\n",
"def get_default(key, defaults={}):\n\n if key == 'port':\n return defaults.get(\"port\", DEFAULT_PORT)\n\n if key == 'user':\n return defaults.get(\"user\", DEFAULT_USER)\n\n return defaults.get(key)",
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
clone
|
python
|
def clone(name, clone_name, config=None):
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Clone an entry to the sshconfig.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L67-L90
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def clone_entry(self, name, clone_name, keep_original=True):\n host = self.is_host_in(name, return_match=True)\n if not host:\n raise ValueError(ERRORS[\"not_found\"].format(name))\n\n # check if an entry with the clone name already exists \n if name == clone_name \\\n or self.is_host_in(clone_name, return_match=True) is not None:\n raise ValueError(ERRORS[\"already_in\"].format(clone_name))\n\n self.ssh_config.add_host(clone_name, host.get('options'))\n if not keep_original:\n self.ssh_config.delete_host(name)\n self.ssh_config.write_to_ssh_config()\n\n return True\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
move
|
python
|
def move(name, entry_name, config=None):
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Move an entry to the sshconfig.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L93-L117
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def clone_entry(self, name, clone_name, keep_original=True):\n host = self.is_host_in(name, return_match=True)\n if not host:\n raise ValueError(ERRORS[\"not_found\"].format(name))\n\n # check if an entry with the clone name already exists \n if name == clone_name \\\n or self.is_host_in(clone_name, return_match=True) is not None:\n raise ValueError(ERRORS[\"already_in\"].format(clone_name))\n\n self.ssh_config.add_host(clone_name, host.get('options'))\n if not keep_original:\n self.ssh_config.delete_host(name)\n self.ssh_config.write_to_ssh_config()\n\n return True\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
edit
|
python
|
def edit(name, connection_uri, id_file="", o=[], config=None):
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Edits the related entry in ssh config.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L120-L143
|
[
"def parse(uri, user=None, port=22):\n \"\"\"\n parses ssh connection uri-like sentences.\n ex:\n - root@google.com -> (root, google.com, 22)\n - noreply@facebook.com:22 -> (noreply, facebook.com, 22)\n - facebook.com:3306 -> ($USER, facebook.com, 3306)\n - twitter.com -> ($USER, twitter.com, 22)\n\n default port: 22\n default user: $USER (getpass.getuser())\n \"\"\"\n\n uri = uri.strip()\n\n if not user:\n user = getpass.getuser()\n\n # get user\n if '@' in uri:\n user = uri.split(\"@\")[0]\n\n # get port\n if ':' in uri:\n port = uri.split(\":\")[-1]\n\n try:\n port = int(port)\n except ValueError:\n raise ValueError(\"port must be numeric.\")\n\n # get host\n uri = re.sub(\":.*\", \"\", uri)\n uri = re.sub(\".*@\", \"\", uri)\n host = uri\n\n return (\n user,\n host,\n port,\n )\n",
"def get_default(key, defaults={}):\n\n if key == 'port':\n return defaults.get(\"port\", DEFAULT_PORT)\n\n if key == 'user':\n return defaults.get(\"user\", DEFAULT_USER)\n\n return defaults.get(key)",
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def edit_entry(self, name, host, user, port, id_file, custom_options=[]):\n if not self.is_host_in(name):\n raise ValueError(ERRORS[\"not_found\"].format(name))\n\n options = self.get_options(host, user, port, id_file, custom_options)\n self.ssh_config.update_host(name, options, use_regex=False)\n self.ssh_config.write_to_ssh_config()\n\n return True\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
update
|
python
|
def update(name, connection_uri="", id_file="", o=[], config=None):
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L146-L169
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
delete
|
python
|
def delete(name, config=None):
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
|
Deletes a single host.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L172-L187
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def delete_entry(self, name):\n self.ssh_config.delete_host(name)\n self.ssh_config.write_to_ssh_config()\n\n return True\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
list
|
python
|
def list(config=None):
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
Lists all hosts from ssh config.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L190-L258
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
search
|
python
|
def search(search_text, config=None):
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
Searches entries by given search text.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L261-L278
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
delete_all
|
python
|
def delete_all(config=None):
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
Deletes all hosts from ssh config.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L281-L292
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def delete_all_entries(self):\n self.ssh_config.delete_all_hosts()\n\n return True\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
backup
|
python
|
def backup(target_file, config=None):
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
|
Backups the main ssh configuration into target file.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L295-L304
|
[
"def get_formatted_message(message, format_type):\n\n # required for CLI test suite. see tests.py\n if 'TESTMODE' in os.environ and not isinstance(message, ValueError):\n for color_code in COLOR_CODES:\n message = message.replace(color_code, \"\")\n\n return \"{0} {1}\".format(format_type, message)\n\n format_typed = fixed_width(format_type, 8)\n all_message = \"\"\n message = \" %s\" % message\n\n if format_type == 'error':\n all_message = colored(format_typed, 'white', 'on_red')\n if format_type == 'success':\n all_message = colored(format_typed, 'white', 'on_green')\n\n return all_message + message\n",
"def get_storm_instance(config_file=None):\n return Storm(config_file)\n",
"def backup(self, target_file):\n return copyfile(self.ssh_config.ssh_config_file, target_file)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
def web(port, debug=False, theme="modern", ssh_config=None):
"""Starts the web UI."""
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/__main__.py
|
web
|
python
|
def web(port, debug=False, theme="modern", ssh_config=None):
from storm import web as _web
_web.run(port, debug, theme, ssh_config)
|
Starts the web UI.
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L310-L313
|
[
"def run(port, debug, theme, ssh_config=None):\n global __THEME__\n port = int(port)\n debug = bool(debug)\n __THEME__ = theme\n\n def get_storm():\n return Storm(ssh_config)\n\n app.get_storm = get_storm\n\n app.run(port=port, debug=debug)\n"
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
try:
import __builtin__ as builtins
except ImportError:
import builtins
from storm import Storm
from storm.parsers.ssh_uri_parser import parse
from storm.utils import (get_formatted_message, colored)
from storm.kommandr import *
from storm.defaults import get_default
from storm import __version__
import sys
def get_storm_instance(config_file=None):
return Storm(config_file)
@command('version')
def version():
"""
prints the working storm(ssh) version.
"""
print(__version__)
@command('add')
def add(name, connection_uri, id_file="", o=[], config=None):
"""
Adds a new entry to sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.add_entry(name, host, user, port, id_file, o)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('clone')
def clone(name, clone_name, config=None):
"""
Clone an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
# validate name
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, clone_name)
print(
get_formatted_message(
'{0} added to your ssh config. you can connect '
'it by typing "ssh {0}".'.format(clone_name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('move')
def move(name, entry_name, config=None):
"""
Move an entry to the sshconfig.
"""
storm_ = get_storm_instance(config)
try:
if '@' in name:
raise ValueError('invalid value: "@" cannot be used in name.')
storm_.clone_entry(name, entry_name, keep_original=False)
print(
get_formatted_message(
'{0} moved in ssh config. you can '
'connect it by typing "ssh {0}".'.format(
entry_name
),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('edit')
def edit(name, connection_uri, id_file="", o=[], config=None):
"""
Edits the related entry in ssh config.
"""
storm_ = get_storm_instance(config)
try:
if ',' in name:
name = " ".join(name.split(","))
user, host, port = parse(
connection_uri,
user=get_default("user", storm_.defaults),
port=get_default("port", storm_.defaults)
)
storm_.edit_entry(name, host, user, port, id_file, o)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('update')
def update(name, connection_uri="", id_file="", o=[], config=None):
"""
Enhanced version of the edit command featuring multiple
edits using regular expressions to match entries
"""
storm_ = get_storm_instance(config)
settings = {}
if id_file != "":
settings['identityfile'] = id_file
for option in o:
k, v = option.split("=")
settings[k] = v
try:
storm_.update_entry(name, **settings)
print(get_formatted_message(
'"{0}" updated successfully.'.format(
name
), 'success'))
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('delete')
def delete(name, config=None):
"""
Deletes a single host.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_entry(name)
print(
get_formatted_message(
'hostname "{0}" deleted successfully.'.format(name),
'success')
)
except ValueError as error:
print(get_formatted_message(error, 'error'), file=sys.stderr)
sys.exit(1)
@command('list')
def list(config=None):
"""
Lists all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
result = colored('Listing entries:', 'white', attrs=["bold", ]) + "\n\n"
result_stack = ""
for host in storm_.list_entries(True):
if host.get("type") == 'entry':
if not host.get("host") == "*":
result += " {0} -> {1}@{2}:{3}".format(
colored(host["host"], 'green', attrs=["bold", ]),
host.get("options").get(
"user", get_default("user", storm_.defaults)
),
host.get("options").get(
"hostname", "[hostname_not_specified]"
),
host.get("options").get(
"port", get_default("port", storm_.defaults)
)
)
extra = False
for key, value in six.iteritems(host.get("options")):
if not key in ["user", "hostname", "port"]:
if not extra:
custom_options = colored(
'\n\t[custom options] ', 'white'
)
result += " {0}".format(custom_options)
extra = True
if isinstance(value, collections.Sequence):
if isinstance(value, builtins.list):
value = ",".join(value)
result += "{0}={1} ".format(key, value)
if extra:
result = result[0:-1]
result += "\n\n"
else:
result_stack = colored(
" (*) General options: \n", "green", attrs=["bold",]
)
for key, value in six.iteritems(host.get("options")):
if isinstance(value, type([])):
result_stack += "\t {0}: ".format(
colored(key, "magenta")
)
result_stack += ', '.join(value)
result_stack += "\n"
else:
result_stack += "\t {0}: {1}\n".format(
colored(key, "magenta"),
value,
)
result_stack = result_stack[0:-1] + "\n"
result += result_stack
print(get_formatted_message(result, ""))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('search')
def search(search_text, config=None):
"""
Searches entries by given search text.
"""
storm_ = get_storm_instance(config)
try:
results = storm_.search_host(search_text)
if len(results) == 0:
print ('no results found.')
if len(results) > 0:
message = 'Listing results for {0}:\n'.format(search_text)
message += "".join(results)
print(message)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('delete_all')
def delete_all(config=None):
"""
Deletes all hosts from ssh config.
"""
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message('all entries deleted.', 'success'))
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('backup')
def backup(target_file, config=None):
"""
Backups the main ssh configuration into target file.
"""
storm_ = get_storm_instance(config)
try:
storm_.backup(target_file)
except Exception as error:
print(get_formatted_message(str(error), 'error'), file=sys.stderr)
sys.exit(1)
@command('web')
@arg('port', nargs='?', default=9002, type=int)
@arg('theme', nargs='?', default="modern", choices=['modern', 'black', 'storm'])
@arg('debug', action='store_true', default=False)
if __name__ == '__main__':
sys.exit(main())
|
emre/storm
|
storm/parsers/ssh_config_parser.py
|
StormConfig.parse
|
python
|
def parse(self, file_obj):
order = 1
host = {"host": ['*'], "config": {}, }
for line in file_obj:
line = line.rstrip('\n').lstrip()
if line == '':
self._config.append({
'type': 'empty_line',
'value': line,
'host': '',
'order': order,
})
order += 1
continue
if line.startswith('#'):
self._config.append({
'type': 'comment',
'value': line,
'host': '',
'order': order,
})
order += 1
continue
if '=' in line:
# Ensure ProxyCommand gets properly split
if line.lower().strip().startswith('proxycommand'):
proxy_re = re.compile(r"^(proxycommand)\s*=*\s*(.*)", re.I)
match = proxy_re.match(line)
key, value = match.group(1).lower(), match.group(2)
else:
key, value = line.split('=', 1)
key = key.strip().lower()
else:
# find first whitespace, and split there
i = 0
while (i < len(line)) and not line[i].isspace():
i += 1
if i == len(line):
raise Exception('Unparsable line: %r' % line)
key = line[:i].lower()
value = line[i:].lstrip()
if key == 'host':
self._config.append(host)
value = value.split()
host = {
key: value,
'config': {},
'type': 'entry',
'order': order
}
order += 1
elif key in ['identityfile', 'localforward', 'remoteforward']:
if key in host['config']:
host['config'][key].append(value)
else:
host['config'][key] = [value]
elif key not in host['config']:
host['config'].update({key: value})
self._config.append(host)
|
Read an OpenSSH config from the given file object.
@param file_obj: a file-like object to read the config file from
@type file_obj: file
|
train
|
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/parsers/ssh_config_parser.py#L16-L82
| null |
class StormConfig(SSHConfig):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.